aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/mac80211.tmpl2
-rw-r--r--Documentation/feature-removal-schedule.txt21
-rw-r--r--Documentation/isdn/INTERFACE.CAPI9
-rw-r--r--Documentation/networking/ip-sysctl.txt34
-rwxr-xr-xDocumentation/networking/ixgbevf.txt90
-rw-r--r--Documentation/networking/regulatory.txt24
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/can.txt53
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/mpc5200.txt9
-rw-r--r--MAINTAINERS16
-rw-r--r--arch/ia64/kvm/Kconfig1
-rw-r--r--arch/powerpc/boot/dts/mpc8569mds.dts47
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c24
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/atm/fore200e.c11
-rw-r--r--drivers/atm/idt77252.c5
-rw-r--r--drivers/atm/lanai.c14
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/firmware/iscsi_ibft.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c11
-rw-r--r--drivers/isdn/capi/capi.c99
-rw-r--r--drivers/isdn/capi/capidrv.c55
-rw-r--r--drivers/isdn/capi/kcapi.c8
-rw-r--r--drivers/isdn/gigaset/capi.c75
-rw-r--r--drivers/isdn/hardware/avm/avmcard.h6
-rw-r--r--drivers/isdn/hardware/avm/b1.c54
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c71
-rw-r--r--drivers/isdn/hardware/avm/b1isa.c2
-rw-r--r--drivers/isdn/hardware/avm/b1pci.c4
-rw-r--r--drivers/isdn/hardware/avm/b1pcmcia.c2
-rw-r--r--drivers/isdn/hardware/avm/c4.c53
-rw-r--r--drivers/isdn/hardware/avm/t1isa.c2
-rw-r--r--drivers/isdn/hardware/avm/t1pci.c2
-rw-r--r--drivers/isdn/hardware/eicon/capimain.c40
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c45
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c48
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c198
-rw-r--r--drivers/isdn/hysdn/hycapi.c56
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c7
-rw-r--r--drivers/message/i2o/i2o_proc.c11
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c50
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h4
-rw-r--r--drivers/misc/iwmc3200top/log.h31
-rw-r--r--drivers/misc/iwmc3200top/main.c59
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig29
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c5
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/ep93xx_eth.c140
-rw-r--r--drivers/net/atl1c/atl1c_main.c9
-rw-r--r--drivers/net/atl1e/atl1e_main.c7
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/benet/be_cmds.c30
-rw-r--r--drivers/net/benet/be_cmds.h17
-rw-r--r--drivers/net/benet/be_ethtool.c63
-rw-r--r--drivers/net/bnx2.c133
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/bnx2x_main.c6
-rw-r--r--drivers/net/bonding/bond_main.c23
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/mcp251x.c17
-rw-r--r--drivers/net/can/mscan/Kconfig7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c248
-rw-r--r--drivers/net/can/mscan/mscan.c58
-rw-r--r--drivers/net/can/mscan/mscan.h86
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c2
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c472
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/ti_hecc.c5
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/can/vcan.c12
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/cxgb3/sge.c16
-rw-r--r--drivers/net/defxx.c9
-rw-r--r--drivers/net/dl2k.h2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000/e1000_main.c6
-rw-r--r--drivers/net/e1000e/82571.c68
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h17
-rw-r--r--drivers/net/e1000e/es2lan.c32
-rw-r--r--drivers/net/e1000e/hw.h12
-rw-r--r--drivers/net/e1000e/ich8lan.c1
-rw-r--r--drivers/net/e1000e/lib.c230
-rw-r--r--drivers/net/e1000e/netdev.c24
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_main.c194
-rw-r--r--drivers/net/enic/enic_res.c16
-rw-r--r--drivers/net/enic/vnic_dev.c1
-rw-r--r--drivers/net/enic/vnic_enet.h5
-rw-r--r--drivers/net/enic/vnic_intr.c8
-rw-r--r--drivers/net/enic/vnic_intr.h3
-rw-r--r--drivers/net/enic/vnic_nic.h12
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hp100.c2
-rw-r--r--drivers/net/igb/igb_main.c18
-rw-r--r--drivers/net/igbvf/netdev.c15
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/ipg.c2
-rw-r--r--drivers/net/irda/Kconfig10
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/sh_sir.c823
-rw-r--r--drivers/net/irda/via-ircc.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ixgbe/Makefile3
-rw-r--r--drivers/net/ixgbe/ixgbe.h30
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c132
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c19
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c308
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c479
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h96
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c362
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h47
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h57
-rw-r--r--drivers/net/ixgbevf/Makefile38
-rw-r--r--drivers/net/ixgbevf/defines.h292
-rw-r--r--drivers/net/ixgbevf/ethtool.c716
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h318
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3578
-rw-r--r--drivers/net/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ixgbevf/mbx.h100
-rw-r--r--drivers/net/ixgbevf/regs.h85
-rw-r--r--drivers/net/ixgbevf/vf.c387
-rw-r--r--drivers/net/ixgbevf/vf.h168
-rw-r--r--drivers/net/jme.c2
-rw-r--r--drivers/net/lib82596.c8
-rw-r--r--drivers/net/mac8390.c632
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/meth.c3
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mv643xx_eth.c3
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/myri_sbus.c6
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netxen/Makefile2
-rw-r--r--drivers/net/netxen/netxen_nic.h8
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c2
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h5
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c19
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c213
-rw-r--r--drivers/net/niu.c6
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c3
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/marvell.c38
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/ppp_generic.c122
-rw-r--r--drivers/net/qla3xxx.c3
-rw-r--r--drivers/net/qlcnic/Makefile8
-rw-r--r--drivers/net/qlcnic/qlcnic.h1126
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c534
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c1015
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h937
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1275
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c1541
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c2720
-rw-r--r--drivers/net/qlge/qlge.h434
-rw-r--r--drivers/net/qlge/qlge_dbg.c1171
-rw-r--r--drivers/net/qlge/qlge_main.c353
-rw-r--r--drivers/net/qlge/qlge_mpi.c165
-rw-r--r--drivers/net/r6040.c2
-rw-r--r--drivers/net/r8169.c11
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sc92031.c2
-rw-r--r--drivers/net/sfc/efx.c2
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/ethtool.c10
-rw-r--r--drivers/net/sfc/falcon.c6
-rw-r--r--drivers/net/sfc/mcdi.c109
-rw-r--r--drivers/net/sfc/mcdi.h1
-rw-r--r--drivers/net/sfc/mcdi_pcol.h202
-rw-r--r--drivers/net/sfc/mcdi_phy.c36
-rw-r--r--drivers/net/sfc/mdio_10g.c24
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/net_driver.h17
-rw-r--r--drivers/net/sfc/nic.c13
-rw-r--r--drivers/net/sfc/qt202x_phy.c1
-rw-r--r--drivers/net/sfc/selftest.c42
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c16
-rw-r--r--drivers/net/sfc/tenxpress.c2
-rw-r--r--drivers/net/sh_eth.c10
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/skfp/skfddi.c21
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c96
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/smc911x.c6
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/spider_net.c2
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/stmmac/Kconfig8
-rw-r--r--drivers/net/stmmac/Makefile5
-rw-r--r--drivers/net/stmmac/common.h277
-rw-r--r--drivers/net/stmmac/descs.h4
-rw-r--r--drivers/net/stmmac/dwmac100.c (renamed from drivers/net/stmmac/mac100.c)202
-rw-r--r--drivers/net/stmmac/dwmac100.h (renamed from drivers/net/stmmac/mac100.h)0
-rw-r--r--drivers/net/stmmac/dwmac1000.h (renamed from drivers/net/stmmac/gmac.h)18
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c245
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c (renamed from drivers/net/stmmac/gmac.c)347
-rw-r--r--drivers/net/stmmac/dwmac_dma.h107
-rw-r--r--drivers/net/stmmac/dwmac_lib.c263
-rw-r--r--drivers/net/stmmac/stmmac.h28
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c9
-rw-r--r--drivers/net/stmmac/stmmac_main.c436
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/sunvnet.c5
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tehuti.c2
-rw-r--r--drivers/net/tg3.c92
-rw-r--r--drivers/net/tg3.h25
-rw-r--r--drivers/net/tlan.c30
-rw-r--r--drivers/net/tlan.h3
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/net/tulip/21142.c76
-rw-r--r--drivers/net/tulip/de2104x.c140
-rw-r--r--drivers/net/tulip/dmfe.c69
-rw-r--r--drivers/net/tulip/eeprom.c47
-rw-r--r--drivers/net/tulip/interrupt.c100
-rw-r--r--drivers/net/tulip/media.c74
-rw-r--r--drivers/net/tulip/pnic.c33
-rw-r--r--drivers/net/tulip/pnic2.c59
-rw-r--r--drivers/net/tulip/timer.c52
-rw-r--r--drivers/net/tulip/tulip_core.c164
-rw-r--r--drivers/net/tulip/uli526x.c48
-rw-r--r--drivers/net/tulip/winbond-840.c179
-rw-r--r--drivers/net/tulip/xircom_cb.c46
-rw-r--r--drivers/net/tun.c101
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/ucc_geth.c23
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/mcs7830.c246
-rw-r--r--drivers/net/usb/rtl8150.c7
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c8
-rw-r--r--drivers/net/virtio_net.c439
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vxge/vxge-main.c8
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300too.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c17
-rw-r--r--drivers/net/wimax/i2400m/fw.c11
-rw-r--r--drivers/net/wireless/adm8211.c14
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h9
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c110
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c42
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c121
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h71
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c156
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c428
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c150
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c861
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1375
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c60
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig17
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h21
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/main.c43
-rw-r--r--drivers/net/wireless/b43/phy_lp.c24
-rw-r--r--drivers/net/wireless/b43/phy_n.c1795
-rw-r--r--drivers/net/wireless/b43/phy_n.h87
-rw-r--r--drivers/net/wireless/b43/pio.h40
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c577
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h71
-rw-r--r--drivers/net/wireless/b43legacy/main.c35
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c256
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c148
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c154
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c101
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c71
-rw-r--r--drivers/net/wireless/libertas/Kconfig6
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/assoc.c17
-rw-r--r--drivers/net/wireless/libertas/cmd.c22
-rw-r--r--drivers/net/wireless/libertas/cmd.h12
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c16
-rw-r--r--drivers/net/wireless/libertas/defs.h7
-rw-r--r--drivers/net/wireless/libertas/dev.h7
-rw-r--r--drivers/net/wireless/libertas/ethtool.c2
-rw-r--r--drivers/net/wireless/libertas/main.c42
-rw-r--r--drivers/net/wireless/libertas/mesh.c29
-rw-r--r--drivers/net/wireless/libertas/mesh.h32
-rw-r--r--drivers/net/wireless/libertas/scan.c2
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c26
-rw-r--r--drivers/net/wireless/libertas_tf/main.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c114
-rw-r--r--drivers/net/wireless/mwl8k.c2111
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/main.c12
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c42
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c38
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c117
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c57
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h34
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c26
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c79
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c37
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c28
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c15
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h87
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c83
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.c23
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c351
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h17
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h35
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c134
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h33
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h100
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c62
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c20
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c492
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c15
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h99
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c43
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c10
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c14
-rw-r--r--drivers/net/xilinx_emaclite.c6
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c169
-rw-r--r--drivers/s390/net/qeth_core_mpc.h44
-rw-r--r--drivers/s390/net/qeth_core_sys.c14
-rw-r--r--drivers/s390/net/qeth_l2_main.c30
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c176
-rw-r--r--drivers/s390/net/qeth_l3_sys.c56
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h9
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c24
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c6
-rw-r--r--drivers/vhost/Kconfig11
-rw-r--r--drivers/vhost/Makefile2
-rw-r--r--drivers/vhost/net.c661
-rw-r--r--drivers/vhost/vhost.c1098
-rw-r--r--drivers/vhost/vhost.h161
-rw-r--r--drivers/virtio/virtio_ring.c25
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/can/dev.h16
-rw-r--r--include/linux/can/netlink.h1
-rw-r--r--include/linux/ieee80211.h106
-rw-r--r--include/linux/if_tun.h14
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/in.h2
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/isdn/capilli.h3
-rw-r--r--include/linux/llc.h7
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/netdevice.h16
-rw-r--r--include/linux/netpoll.h11
-rw-r--r--include/linux/nl80211.h94
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/stmmac.h53
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/vhost.h130
-rw-r--r--include/linux/virtio.h4
-rw-r--r--include/net/cfg80211.h112
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/icmp.h2
-rw-r--r--include/net/inet_sock.h4
-rw-r--r--include/net/llc.h39
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/mac80211.h313
-rw-r--r--include/net/phonet/pep.h3
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/request_sock.h2
-rw-r--r--include/net/sch_generic.h19
-rw-r--r--include/net/snmp.h2
-rw-r--r--include/net/tcp.h21
-rw-r--r--include/net/xfrm.h6
-rw-r--r--lib/vsprintf.c48
-rw-r--r--mm/mmu_context.c3
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c7
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/atm/addr.c2
-rw-r--r--net/atm/atm_misc.c40
-rw-r--r--net/atm/atm_sysfs.c27
-rw-r--r--net/atm/br2684.c90
-rw-r--r--net/atm/clip.c86
-rw-r--r--net/atm/common.c386
-rw-r--r--net/atm/ioctl.c196
-rw-r--r--net/atm/lec.c599
-rw-r--r--net/atm/mpc.c540
-rw-r--r--net/atm/mpoa_caches.c190
-rw-r--r--net/atm/mpoa_proc.c89
-rw-r--r--net/atm/pppoatm.c28
-rw-r--r--net/atm/proc.c81
-rw-r--r--net/atm/pvc.c43
-rw-r--r--net/atm/raw.c26
-rw-r--r--net/atm/resources.c402
-rw-r--r--net/atm/signaling.c219
-rw-r--r--net/atm/svc.c258
-rw-r--r--net/bluetooth/cmtp/capi.c37
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/can/af_can.c124
-rw-r--r--net/can/af_can.h4
-rw-r--r--net/can/proc.c93
-rw-r--r--net/core/dev.c65
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/neighbour.c11
-rw-r--r--net/core/netpoll.c169
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/sock.c6
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/ethernet/eth.c6
-rw-r--r--net/ipv4/arp.c55
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_semantics.c76
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c87
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c39
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_sockglue.c14
-rw-r--r--net/ipv4/ipcomp.c9
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp.c30
-rw-r--r--net/ipv4/tcp_ipv4.c25
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udplite.c4
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/anycast.c2
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_fib.c6
-rw-r--r--net/ipv6/ip6_flowlabel.c9
-rw-r--r--net/ipv6/ip6_tunnel.c9
-rw-r--r--net/ipv6/ipcomp6.c13
-rw-r--r--net/ipv6/mcast.c13
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/proc.c18
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/sit.c9
-rw-r--r--net/ipv6/syncookies.c3
-rw-r--r--net/ipv6/sysctl_net_ipv6.c4
-rw-r--r--net/ipv6/tcp_ipv6.c22
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/udplite.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c156
-rw-r--r--net/irda/ircomm/ircomm_tty.c6
-rw-r--r--net/key/af_key.c15
-rw-r--r--net/llc/af_llc.c64
-rw-r--r--net/llc/llc_conn.c143
-rw-r--r--net/llc/llc_core.c53
-rw-r--r--net/llc/llc_output.c45
-rw-r--r--net/llc/llc_proc.c69
-rw-r--r--net/llc/llc_sap.c111
-rw-r--r--net/mac80211/Kconfig12
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-rx.c15
-rw-r--r--net/mac80211/agg-tx.c30
-rw-r--r--net/mac80211/cfg.c163
-rw-r--r--net/mac80211/debugfs.c94
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_netdev.c212
-rw-r--r--net/mac80211/debugfs_netdev.h9
-rw-r--r--net/mac80211/debugfs_sta.c64
-rw-r--r--net/mac80211/driver-ops.h115
-rw-r--r--net/mac80211/driver-trace.h86
-rw-r--r--net/mac80211/ht.c53
-rw-r--r--net/mac80211/ibss.c81
-rw-r--r--net/mac80211/ieee80211_i.h191
-rw-r--r--net/mac80211/iface.c102
-rw-r--r--net/mac80211/key.c10
-rw-r--r--net/mac80211/key.h8
-rw-r--r--net/mac80211/main.c68
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c20
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_plink.c6
-rw-r--r--net/mac80211/mlme.c1203
-rw-r--r--net/mac80211/offchannel.c170
-rw-r--r--net/mac80211/pm.c10
-rw-r--r--net/mac80211/rate.c88
-rw-r--r--net/mac80211/rate.h5
-rw-r--r--net/mac80211/rx.c314
-rw-r--r--net/mac80211/scan.c230
-rw-r--r--net/mac80211/spectmgmt.c4
-rw-r--r--net/mac80211/sta_info.c60
-rw-r--r--net/mac80211/sta_info.h32
-rw-r--r--net/mac80211/status.c48
-rw-r--r--net/mac80211/tkip.c38
-rw-r--r--net/mac80211/tx.c317
-rw-r--r--net/mac80211/util.c297
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/mac80211/work.c1098
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/datagram.c6
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/phonet/pep.c29
-rw-r--r--net/phonet/pn_dev.c4
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sched/sch_fifo.c34
-rw-r--r--net/sctp/proc.c2
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sysctl_net.c4
-rw-r--r--net/tipc/Kconfig75
-rw-r--r--net/tipc/core.c10
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/unix/sysctl_net_unix.c2
-rw-r--r--net/wireless/.gitignore1
-rw-r--r--net/wireless/Kconfig13
-rw-r--r--net/wireless/Makefile6
-rw-r--r--net/wireless/chan.c41
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/core.h8
-rw-r--r--net/wireless/db.txt17
-rw-r--r--net/wireless/genregdb.awk118
-rw-r--r--net/wireless/mlme.c48
-rw-r--r--net/wireless/nl80211.c412
-rw-r--r--net/wireless/nl80211.h15
-rw-r--r--net/wireless/reg.c526
-rw-r--r--net/wireless/reg.h11
-rw-r--r--net/wireless/regdb.h7
-rw-r--r--net/wireless/scan.c120
-rw-r--r--net/wireless/sme.c1
-rw-r--r--net/wireless/util.c132
-rw-r--r--net/wireless/wext-compat.c39
-rw-r--r--net/wireless/wext-proc.c4
-rw-r--r--net/xfrm/xfrm_proc.c2
-rw-r--r--net/xfrm/xfrm_sysctl.c4
653 files changed, 44533 insertions, 12562 deletions
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
index f3f37f141db..971d1c0c83e 100644
--- a/Documentation/DocBook/mac80211.tmpl
+++ b/Documentation/DocBook/mac80211.tmpl
@@ -144,7 +144,7 @@ usage should require reading the full document.
144 this though and the recommendation to allow only a single 144 this though and the recommendation to allow only a single
145 interface in STA mode at first! 145 interface in STA mode at first!
146 </para> 146 </para>
147!Finclude/net/mac80211.h ieee80211_if_init_conf 147!Finclude/net/mac80211.h ieee80211_vif
148 </chapter> 148 </chapter>
149 149
150 <chapter id="rx-tx"> 150 <chapter id="rx-tx">
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 0a46833c1b7..2f93ac06c41 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -88,27 +88,6 @@ Who: Luis R. Rodriguez <lrodriguez@atheros.com>
88 88
89--------------------------- 89---------------------------
90 90
91What: CONFIG_WIRELESS_OLD_REGULATORY - old static regulatory information
92When: March 2010 / desktop catchup
93
94Why: The old regulatory infrastructure has been replaced with a new one
95 which does not require statically defined regulatory domains. We do
96 not want to keep static regulatory domains in the kernel due to the
97 the dynamic nature of regulatory law and localization. We kept around
98 the old static definitions for the regulatory domains of:
99
100 * US
101 * JP
102 * EU
103
104 and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was
105 set. We will remove this option once the standard Linux desktop catches
106 up with the new userspace APIs we have implemented.
107
108Who: Luis R. Rodriguez <lrodriguez@atheros.com>
109
110---------------------------
111
112What: dev->power.power_state 91What: dev->power.power_state
113When: July 2007 92When: July 2007
114Why: Broken design for runtime control over driver power states, confusing 93Why: Broken design for runtime control over driver power states, confusing
diff --git a/Documentation/isdn/INTERFACE.CAPI b/Documentation/isdn/INTERFACE.CAPI
index 5fe8de5cc72..f172091fb7c 100644
--- a/Documentation/isdn/INTERFACE.CAPI
+++ b/Documentation/isdn/INTERFACE.CAPI
@@ -149,10 +149,11 @@ char *(*procinfo)(struct capi_ctr *ctrlr)
149 pointer to a callback function returning the entry for the device in 149 pointer to a callback function returning the entry for the device in
150 the CAPI controller info table, /proc/capi/controller 150 the CAPI controller info table, /proc/capi/controller
151 151
152read_proc_t *ctr_read_proc 152const struct file_operations *proc_fops
153 pointer to the read_proc callback function for the device's proc file 153 pointers to callback functions for the device's proc file
154 system entry, /proc/capi/controllers/<n>; will be called with a 154 system entry, /proc/capi/controllers/<n>; pointer to the device's
155 pointer to the device's capi_ctr structure as the last (data) argument 155 capi_ctr structure is available from struct proc_dir_entry::data
156 which is available from struct inode.
156 157
157Note: Callback functions except send_message() are never called in interrupt 158Note: Callback functions except send_message() are never called in interrupt
158context. 159context.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 006b39dec87..2dc7a1d9768 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -692,6 +692,25 @@ proxy_arp - BOOLEAN
692 conf/{all,interface}/proxy_arp is set to TRUE, 692 conf/{all,interface}/proxy_arp is set to TRUE,
693 it will be disabled otherwise 693 it will be disabled otherwise
694 694
695proxy_arp_pvlan - BOOLEAN
696 Private VLAN proxy arp.
697 Basically allow proxy arp replies back to the same interface
698 (from which the ARP request/solicitation was received).
699
700 This is done to support (ethernet) switch features, like RFC
701 3069, where the individual ports are NOT allowed to
702 communicate with each other, but they are allowed to talk to
703 the upstream router. As described in RFC 3069, it is possible
704 to allow these hosts to communicate through the upstream
705 router by proxy_arp'ing. Don't need to be used together with
706 proxy_arp.
707
708 This technology is known by different names:
709 In RFC 3069 it is called VLAN Aggregation.
710 Cisco and Allied Telesyn call it Private VLAN.
711 Hewlett-Packard call it Source-Port filtering or port-isolation.
712 Ericsson call it MAC-Forced Forwarding (RFC Draft).
713
695shared_media - BOOLEAN 714shared_media - BOOLEAN
696 Send(router) or accept(host) RFC1620 shared media redirects. 715 Send(router) or accept(host) RFC1620 shared media redirects.
697 Overrides ip_secure_redirects. 716 Overrides ip_secure_redirects.
@@ -833,9 +852,18 @@ arp_notify - BOOLEAN
833 or hardware address changes. 852 or hardware address changes.
834 853
835arp_accept - BOOLEAN 854arp_accept - BOOLEAN
836 Define behavior when gratuitous arp replies are received: 855 Define behavior for gratuitous ARP frames who's IP is not
837 0 - drop gratuitous arp frames 856 already present in the ARP table:
838 1 - accept gratuitous arp frames 857 0 - don't create new entries in the ARP table
858 1 - create new entries in the ARP table
859
860 Both replies and requests type gratuitous arp will trigger the
861 ARP table to be updated, if this setting is on.
862
863 If the ARP table already contains the IP address of the
864 gratuitous arp frame, the arp table will be updated regardless
865 if this setting is on or off.
866
839 867
840app_solicit - INTEGER 868app_solicit - INTEGER
841 The maximum number of probes to send to the user space ARP daemon 869 The maximum number of probes to send to the user space ARP daemon
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
new file mode 100755
index 00000000000..19015de6725
--- /dev/null
+++ b/Documentation/networking/ixgbevf.txt
@@ -0,0 +1,90 @@
1Linux* Base Driver for Intel(R) Network Connection
2==================================================
3
4November 24, 2009
5
6Contents
7========
8
9- In This Release
10- Identifying Your Adapter
11- Known Issues/Troubleshooting
12- Support
13
14In This Release
15===============
16
17This file describes the ixgbevf Linux* Base Driver for Intel Network
18Connection.
19
20The ixgbevf driver supports 82599-based virtual function devices that can only
21be activated on kernels with CONFIG_PCI_IOV enabled.
22
23The ixgbevf driver supports virtual functions generated by the ixgbe driver
24with a max_vfs value of 1 or greater.
25
26The guest OS loading the ixgbevf driver must support MSI-X interrupts.
27
28VLANs: There is a limit of a total of 32 shared VLANs to 1 or more VFs.
29
30Identifying Your Adapter
31========================
32
33For more information on how to identify your adapter, go to the Adapter &
34Driver ID Guide at:
35
36 http://support.intel.com/support/network/sb/CS-008441.htm
37
38Known Issues/Troubleshooting
39============================
40
41 Unloading Physical Function (PF) Driver Causes System Reboots When VM is
42 Running and VF is Loaded on the VM
43 ------------------------------------------------------------------------
44 Do not unload the PF driver (ixgbe) while VFs are assigned to guests.
45
46Support
47=======
48
49For general information, go to the Intel support website at:
50
51 http://support.intel.com
52
53or the Intel Wired Networking project hosted by Sourceforge at:
54
55 http://sourceforge.net/projects/e1000
56
57If an issue is identified with the released source code on the supported
58kernel with a supported adapter, email the specific information related
59to the issue to e1000-devel@lists.sf.net
60
61License
62=======
63
64Intel 10 Gigabit Linux driver.
65Copyright(c) 1999 - 2009 Intel Corporation.
66
67This program is free software; you can redistribute it and/or modify it
68under the terms and conditions of the GNU General Public License,
69version 2, as published by the Free Software Foundation.
70
71This program is distributed in the hope it will be useful, but WITHOUT
72ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
73FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
74more details.
75
76You should have received a copy of the GNU General Public License along with
77this program; if not, write to the Free Software Foundation, Inc.,
7851 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
79
80The full GNU General Public License is included in this distribution in
81the file called "COPYING".
82
83Trademarks
84==========
85
86Intel, Itanium, and Pentium are trademarks or registered trademarks of
87Intel Corporation or its subsidiaries in the United States and other
88countries.
89
90* Other names and brands may be claimed as the property of others.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
index ee31369e9e5..9551622d0a7 100644
--- a/Documentation/networking/regulatory.txt
+++ b/Documentation/networking/regulatory.txt
@@ -188,3 +188,27 @@ Then in some part of your code after your wiphy has been registered:
188 &mydriver_jp_regdom.reg_rules[i], 188 &mydriver_jp_regdom.reg_rules[i],
189 sizeof(struct ieee80211_reg_rule)); 189 sizeof(struct ieee80211_reg_rule));
190 regulatory_struct_hint(rd); 190 regulatory_struct_hint(rd);
191
192Statically compiled regulatory database
193---------------------------------------
194
195In most situations the userland solution using CRDA as described
196above is the preferred solution. However in some cases a set of
197rules built into the kernel itself may be desirable. To account
198for this situation, a configuration option has been provided
199(i.e. CONFIG_CFG80211_INTERNAL_REGDB). With this option enabled,
200the wireless database information contained in net/wireless/db.txt is
201used to generate a data structure encoded in net/wireless/regdb.c.
202That option also enables code in net/wireless/reg.c which queries
203the data in regdb.c as an alternative to using CRDA.
204
205The file net/wireless/db.txt should be kept up-to-date with the db.txt
206file available in the git repository here:
207
208 git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
209
210Again, most users in most situations should be using the CRDA package
211provided with their distribution, and in most other situations users
212should be building and using CRDA on their own rather than using
213this option. If you are not absolutely sure that you should be using
214CONFIG_CFG80211_INTERNAL_REGDB then _DO_NOT_USE_IT_.
diff --git a/Documentation/powerpc/dts-bindings/fsl/can.txt b/Documentation/powerpc/dts-bindings/fsl/can.txt
new file mode 100644
index 00000000000..2fa4fcd38fd
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/fsl/can.txt
@@ -0,0 +1,53 @@
1CAN Device Tree Bindings
2------------------------
3
4(c) 2006-2009 Secret Lab Technologies Ltd
5Grant Likely <grant.likely@secretlab.ca>
6
7fsl,mpc5200-mscan nodes
8-----------------------
9In addition to the required compatible-, reg- and interrupt-properties, you can
10also specify which clock source shall be used for the controller:
11
12- fsl,mscan-clock-source : a string describing the clock source. Valid values
13 are: "ip" for ip bus clock
14 "ref" for reference clock (XTAL)
15 "ref" is default in case this property is not
16 present.
17
18fsl,mpc5121-mscan nodes
19-----------------------
20In addition to the required compatible-, reg- and interrupt-properties, you can
21also specify which clock source and divider shall be used for the controller:
22
23- fsl,mscan-clock-source : a string describing the clock source. Valid values
24 are: "ip" for ip bus clock
25 "ref" for reference clock
26 "sys" for system clock
27 If this property is not present, an optimal CAN
28 clock source and frequency based on the system
29 clock will be selected. If this is not possible,
30 the reference clock will be used.
31
32- fsl,mscan-clock-divider: for the reference and system clock, an additional
33 clock divider can be specified. By default, a
34 value of 1 is used.
35
36Note that the MPC5121 Rev. 1 processor is not supported.
37
38Examples:
39 can@1300 {
40 compatible = "fsl,mpc5121-mscan";
41 interrupts = <12 0x8>;
42 interrupt-parent = <&ipic>;
43 reg = <0x1300 0x80>;
44 };
45
46 can@1380 {
47 compatible = "fsl,mpc5121-mscan";
48 interrupts = <13 0x8>;
49 interrupt-parent = <&ipic>;
50 reg = <0x1380 0x80>;
51 fsl,mscan-clock-source = "ref";
52 fsl,mscan-clock-divider = <3>;
53 };
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
index 5c6602dbfdc..4ccb2cd5df9 100644
--- a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
@@ -195,11 +195,4 @@ External interrupts:
195 195
196fsl,mpc5200-mscan nodes 196fsl,mpc5200-mscan nodes
197----------------------- 197-----------------------
198In addition to the required compatible-, reg- and interrupt-properites, you can 198See file can.txt in this directory.
199also specify which clock source shall be used for the controller:
200
201- fsl,mscan-clock-source- a string describing the clock source. Valid values
202 are: "ip" for ip bus clock
203 "ref" for reference clock (XTAL)
204 "ref" is default in case this property is not
205 present.
diff --git a/MAINTAINERS b/MAINTAINERS
index 03f38c18f32..f19e0989a5a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4443,6 +4443,13 @@ S: Supported
4443F: Documentation/networking/LICENSE.qla3xxx 4443F: Documentation/networking/LICENSE.qla3xxx
4444F: drivers/net/qla3xxx.* 4444F: drivers/net/qla3xxx.*
4445 4445
4446QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
4447M: Amit Kumar Salecha <amit.salecha@qlogic.com>
4448M: linux-driver@qlogic.com
4449L: netdev@vger.kernel.org
4450S: Supported
4451F: drivers/net/qlcnic/
4452
4446QLOGIC QLGE 10Gb ETHERNET DRIVER 4453QLOGIC QLGE 10Gb ETHERNET DRIVER
4447M: Ron Mercer <ron.mercer@qlogic.com> 4454M: Ron Mercer <ron.mercer@qlogic.com>
4448M: linux-driver@qlogic.com 4455M: linux-driver@qlogic.com
@@ -5794,6 +5801,15 @@ S: Maintained
5794F: Documentation/filesystems/vfat.txt 5801F: Documentation/filesystems/vfat.txt
5795F: fs/fat/ 5802F: fs/fat/
5796 5803
5804VIRTIO HOST (VHOST)
5805M: "Michael S. Tsirkin" <mst@redhat.com>
5806L: kvm@vger.kernel.org
5807L: virtualization@lists.osdl.org
5808L: netdev@vger.kernel.org
5809S: Maintained
5810F: drivers/vhost/
5811F: include/linux/vhost.h
5812
5797VIA RHINE NETWORK DRIVER 5813VIA RHINE NETWORK DRIVER
5798M: Roger Luethi <rl@hellgate.ch> 5814M: Roger Luethi <rl@hellgate.ch>
5799S: Maintained 5815S: Maintained
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index ef3e7be29ca..01c75797119 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -47,6 +47,7 @@ config KVM_INTEL
47 Provides support for KVM on Itanium 2 processors equipped with the VT 47 Provides support for KVM on Itanium 2 processors equipped with the VT
48 extensions. 48 extensions.
49 49
50source drivers/vhost/Kconfig
50source drivers/virtio/Kconfig 51source drivers/virtio/Kconfig
51 52
52endif # VIRTUALIZATION 53endif # VIRTUALIZATION
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index 795eb362fcf..8b72eaff5b0 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -535,6 +535,7 @@
535 rx-clock-name = "none"; 535 rx-clock-name = "none";
536 tx-clock-name = "clk12"; 536 tx-clock-name = "clk12";
537 pio-handle = <&pio1>; 537 pio-handle = <&pio1>;
538 tbi-handle = <&tbi1>;
538 phy-handle = <&qe_phy0>; 539 phy-handle = <&qe_phy0>;
539 phy-connection-type = "rgmii-id"; 540 phy-connection-type = "rgmii-id";
540 }; 541 };
@@ -579,7 +580,7 @@
579 reg = <0x6>; 580 reg = <0x6>;
580 device_type = "ethernet-phy"; 581 device_type = "ethernet-phy";
581 }; 582 };
582 tbi-phy@11 { 583 tbi1: tbi-phy@11 {
583 reg = <0x11>; 584 reg = <0x11>;
584 device_type = "tbi-phy"; 585 device_type = "tbi-phy";
585 }; 586 };
@@ -590,7 +591,7 @@
590 reg = <0x3520 0x18>; 591 reg = <0x3520 0x18>;
591 compatible = "fsl,ucc-mdio"; 592 compatible = "fsl,ucc-mdio";
592 593
593 tbi0: tbi-phy@15 { 594 tbi6: tbi-phy@15 {
594 reg = <0x15>; 595 reg = <0x15>;
595 device_type = "tbi-phy"; 596 device_type = "tbi-phy";
596 }; 597 };
@@ -600,7 +601,7 @@
600 #size-cells = <0>; 601 #size-cells = <0>;
601 reg = <0x3720 0x38>; 602 reg = <0x3720 0x38>;
602 compatible = "fsl,ucc-mdio"; 603 compatible = "fsl,ucc-mdio";
603 tbi1: tbi-phy@17 { 604 tbi8: tbi-phy@17 {
604 reg = <0x17>; 605 reg = <0x17>;
605 device_type = "tbi-phy"; 606 device_type = "tbi-phy";
606 }; 607 };
@@ -617,10 +618,22 @@
617 rx-clock-name = "none"; 618 rx-clock-name = "none";
618 tx-clock-name = "clk12"; 619 tx-clock-name = "clk12";
619 pio-handle = <&pio3>; 620 pio-handle = <&pio3>;
621 tbi-handle = <&tbi3>;
620 phy-handle = <&qe_phy2>; 622 phy-handle = <&qe_phy2>;
621 phy-connection-type = "rgmii-id"; 623 phy-connection-type = "rgmii-id";
622 }; 624 };
623 625
626 mdio@2320 {
627 #address-cells = <1>;
628 #size-cells = <0>;
629 reg = <0x2320 0x18>;
630 compatible = "fsl,ucc-mdio";
631 tbi3: tbi-phy@11 {
632 reg = <0x11>;
633 device_type = "tbi-phy";
634 };
635 };
636
624 enet1: ucc@3000 { 637 enet1: ucc@3000 {
625 device_type = "network"; 638 device_type = "network";
626 compatible = "ucc_geth"; 639 compatible = "ucc_geth";
@@ -632,10 +645,22 @@
632 rx-clock-name = "none"; 645 rx-clock-name = "none";
633 tx-clock-name = "clk17"; 646 tx-clock-name = "clk17";
634 pio-handle = <&pio2>; 647 pio-handle = <&pio2>;
648 tbi-handle = <&tbi2>;
635 phy-handle = <&qe_phy1>; 649 phy-handle = <&qe_phy1>;
636 phy-connection-type = "rgmii-id"; 650 phy-connection-type = "rgmii-id";
637 }; 651 };
638 652
653 mdio@3120 {
654 #address-cells = <1>;
655 #size-cells = <0>;
656 reg = <0x3120 0x18>;
657 compatible = "fsl,ucc-mdio";
658 tbi2: tbi-phy@11 {
659 reg = <0x11>;
660 device_type = "tbi-phy";
661 };
662 };
663
639 enet3: ucc@3200 { 664 enet3: ucc@3200 {
640 device_type = "network"; 665 device_type = "network";
641 compatible = "ucc_geth"; 666 compatible = "ucc_geth";
@@ -647,10 +672,22 @@
647 rx-clock-name = "none"; 672 rx-clock-name = "none";
648 tx-clock-name = "clk17"; 673 tx-clock-name = "clk17";
649 pio-handle = <&pio4>; 674 pio-handle = <&pio4>;
675 tbi-handle = <&tbi4>;
650 phy-handle = <&qe_phy3>; 676 phy-handle = <&qe_phy3>;
651 phy-connection-type = "rgmii-id"; 677 phy-connection-type = "rgmii-id";
652 }; 678 };
653 679
680 mdio@3320 {
681 #address-cells = <1>;
682 #size-cells = <0>;
683 reg = <0x3320 0x18>;
684 compatible = "fsl,ucc-mdio";
685 tbi4: tbi-phy@11 {
686 reg = <0x11>;
687 device_type = "tbi-phy";
688 };
689 };
690
654 enet5: ucc@3400 { 691 enet5: ucc@3400 {
655 device_type = "network"; 692 device_type = "network";
656 compatible = "ucc_geth"; 693 compatible = "ucc_geth";
@@ -661,7 +698,7 @@
661 local-mac-address = [ 00 00 00 00 00 00 ]; 698 local-mac-address = [ 00 00 00 00 00 00 ];
662 rx-clock-name = "none"; 699 rx-clock-name = "none";
663 tx-clock-name = "none"; 700 tx-clock-name = "none";
664 tbi-handle = <&tbi0>; 701 tbi-handle = <&tbi6>;
665 phy-handle = <&qe_phy5>; 702 phy-handle = <&qe_phy5>;
666 phy-connection-type = "sgmii"; 703 phy-connection-type = "sgmii";
667 }; 704 };
@@ -676,7 +713,7 @@
676 local-mac-address = [ 00 00 00 00 00 00 ]; 713 local-mac-address = [ 00 00 00 00 00 00 ];
677 rx-clock-name = "none"; 714 rx-clock-name = "none";
678 tx-clock-name = "none"; 715 tx-clock-name = "none";
679 tbi-handle = <&tbi1>; 716 tbi-handle = <&tbi8>;
680 phy-handle = <&qe_phy7>; 717 phy-handle = <&qe_phy7>;
681 phy-connection-type = "sgmii"; 718 phy-connection-type = "sgmii";
682 }; 719 };
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 07703f72330..e28841fbfb8 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -75,6 +75,7 @@ config KVM_E500
75 75
76 If unsure, say N. 76 If unsure, say N.
77 77
78source drivers/vhost/Kconfig
78source drivers/virtio/Kconfig 79source drivers/virtio/Kconfig
79 80
80endif # VIRTUALIZATION 81endif # VIRTUALIZATION
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 21f61b8c445..04ed2156db1 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -237,6 +237,8 @@ static void __init mpc85xx_mds_setup_arch(void)
237 } else if (machine_is(mpc8569_mds)) { 237 } else if (machine_is(mpc8569_mds)) {
238#define BCSR7_UCC12_GETHnRST (0x1 << 2) 238#define BCSR7_UCC12_GETHnRST (0x1 << 2)
239#define BCSR8_UEM_MARVELL_RST (0x1 << 1) 239#define BCSR8_UEM_MARVELL_RST (0x1 << 1)
240#define BCSR_UCC_RGMII (0x1 << 6)
241#define BCSR_UCC_RTBI (0x1 << 5)
240 /* 242 /*
241 * U-Boot mangles interrupt polarity for Marvell PHYs, 243 * U-Boot mangles interrupt polarity for Marvell PHYs,
242 * so reset built-in and UEM Marvell PHYs, this puts 244 * so reset built-in and UEM Marvell PHYs, this puts
@@ -247,6 +249,28 @@ static void __init mpc85xx_mds_setup_arch(void)
247 249
248 setbits8(&bcsr_regs[7], BCSR7_UCC12_GETHnRST); 250 setbits8(&bcsr_regs[7], BCSR7_UCC12_GETHnRST);
249 clrbits8(&bcsr_regs[8], BCSR8_UEM_MARVELL_RST); 251 clrbits8(&bcsr_regs[8], BCSR8_UEM_MARVELL_RST);
252
253 for (np = NULL; (np = of_find_compatible_node(np,
254 "network",
255 "ucc_geth")) != NULL;) {
256 const unsigned int *prop;
257 int ucc_num;
258
259 prop = of_get_property(np, "cell-index", NULL);
260 if (prop == NULL)
261 continue;
262
263 ucc_num = *prop - 1;
264
265 prop = of_get_property(np, "phy-connection-type", NULL);
266 if (prop == NULL)
267 continue;
268
269 if (strcmp("rtbi", (const char *)prop) == 0)
270 clrsetbits_8(&bcsr_regs[7 + ucc_num],
271 BCSR_UCC_RGMII, BCSR_UCC_RTBI);
272 }
273
250 } 274 }
251 iounmap(bcsr_regs); 275 iounmap(bcsr_regs);
252 } 276 }
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 6ee55ae84ce..a7251580891 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -35,6 +35,7 @@ config KVM
35 35
36# OK, it's a little counter-intuitive to do this, but it puts it neatly under 36# OK, it's a little counter-intuitive to do this, but it puts it neatly under
37# the virtualization menu. 37# the virtualization menu.
38source drivers/vhost/Kconfig
38source drivers/virtio/Kconfig 39source drivers/virtio/Kconfig
39 40
40endif # VIRTUALIZATION 41endif # VIRTUALIZATION
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 4cd49833246..3c4d0109ad2 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -65,6 +65,7 @@ config KVM_AMD
65 65
66# OK, it's a little counter-intuitive to do this, but it puts it neatly under 66# OK, it's a little counter-intuitive to do this, but it puts it neatly under
67# the virtualization menu. 67# the virtualization menu.
68source drivers/vhost/Kconfig
68source drivers/lguest/Kconfig 69source drivers/lguest/Kconfig
69source drivers/virtio/Kconfig 70source drivers/virtio/Kconfig
70 71
diff --git a/drivers/Makefile b/drivers/Makefile
index 6ee53c7a57a..81e36596b1e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID) += hid/
106obj-$(CONFIG_PPC_PS3) += ps3/ 106obj-$(CONFIG_PPC_PS3) += ps3/
107obj-$(CONFIG_OF) += of/ 107obj-$(CONFIG_OF) += of/
108obj-$(CONFIG_SSB) += ssb/ 108obj-$(CONFIG_SSB) += ssb/
109obj-$(CONFIG_VHOST_NET) += vhost/
109obj-$(CONFIG_VIRTIO) += virtio/ 110obj-$(CONFIG_VIRTIO) += virtio/
110obj-$(CONFIG_VLYNQ) += vlynq/ 111obj-$(CONFIG_VLYNQ) += vlynq/
111obj-$(CONFIG_STAGING) += staging/ 112obj-$(CONFIG_STAGING) += staging/
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bc53fed89b1..f7d6ebaa041 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2064,12 +2064,10 @@ fore200e_get_esi(struct fore200e* fore200e)
2064 return -EBUSY; 2064 return -EBUSY;
2065 } 2065 }
2066 2066
2067 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n", 2067 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
2068 fore200e->name, 2068 fore200e->name,
2069 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ 2069 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2070 prom->serial_number & 0xFFFF, 2070 prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
2071 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2072 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2073 2071
2074 for (i = 0; i < ESI_LEN; i++) { 2072 for (i = 0; i < ESI_LEN; i++) {
2075 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; 2073 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
@@ -2845,13 +2843,12 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2845 " interrupt line:\t\t%s\n" 2843 " interrupt line:\t\t%s\n"
2846 " physical base address:\t0x%p\n" 2844 " physical base address:\t0x%p\n"
2847 " virtual base address:\t0x%p\n" 2845 " virtual base address:\t0x%p\n"
2848 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n" 2846 " factory address (ESI):\t%pM\n"
2849 " board serial number:\t\t%d\n\n", 2847 " board serial number:\t\t%d\n\n",
2850 fore200e_irq_itoa(fore200e->irq), 2848 fore200e_irq_itoa(fore200e->irq),
2851 (void*)fore200e->phys_base, 2849 (void*)fore200e->phys_base,
2852 fore200e->virt_base, 2850 fore200e->virt_base,
2853 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2], 2851 fore200e->esi,
2854 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2855 fore200e->esi[4] * 256 + fore200e->esi[5]); 2852 fore200e->esi[4] * 256 + fore200e->esi[5]);
2856 2853
2857 return len; 2854 return len;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e33ae0025b1..01f36c08cb5 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3557,10 +3557,7 @@ init_card(struct atm_dev *dev)
3557 if (tmp) { 3557 if (tmp) {
3558 memcpy(card->atmdev->esi, tmp->dev_addr, 6); 3558 memcpy(card->atmdev->esi, tmp->dev_addr, 6);
3559 3559
3560 printk("%s: ESI %02x:%02x:%02x:%02x:%02x:%02x\n", 3560 printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
3561 card->name, card->atmdev->esi[0], card->atmdev->esi[1],
3562 card->atmdev->esi[2], card->atmdev->esi[3],
3563 card->atmdev->esi[4], card->atmdev->esi[5]);
3564 } 3561 }
3565 /* 3562 /*
3566 * XXX: </hack> 3563 * XXX: </hack>
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index cf97c34cbaf..7fe7c324e7e 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -998,9 +998,7 @@ static int __devinit eeprom_validate(struct lanai_dev *lanai)
998 (unsigned int) e[EEPROM_MAC_REV + i]); 998 (unsigned int) e[EEPROM_MAC_REV + i]);
999 return -EIO; 999 return -EIO;
1000 } 1000 }
1001 DPRINTK("eeprom: MAC address = %02X:%02X:%02X:%02X:%02X:%02X\n", 1001 DPRINTK("eeprom: MAC address = %pM\n", &e[EEPROM_MAC]);
1002 e[EEPROM_MAC + 0], e[EEPROM_MAC + 1], e[EEPROM_MAC + 2],
1003 e[EEPROM_MAC + 3], e[EEPROM_MAC + 4], e[EEPROM_MAC + 5]);
1004 /* Verify serial number */ 1002 /* Verify serial number */
1005 lanai->serialno = eeprom_be4(lanai, EEPROM_SERIAL); 1003 lanai->serialno = eeprom_be4(lanai, EEPROM_SERIAL);
1006 v = eeprom_be4(lanai, EEPROM_SERIAL_REV); 1004 v = eeprom_be4(lanai, EEPROM_SERIAL_REV);
@@ -2483,14 +2481,8 @@ static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page)
2483 return sprintf(page, "revision: board=%d, pci_if=%d\n", 2481 return sprintf(page, "revision: board=%d, pci_if=%d\n",
2484 lanai->board_rev, (int) lanai->pci->revision); 2482 lanai->board_rev, (int) lanai->pci->revision);
2485 if (left-- == 0) 2483 if (left-- == 0)
2486 return sprintf(page, "EEPROM ESI: " 2484 return sprintf(page, "EEPROM ESI: %pM\n",
2487 "%02X:%02X:%02X:%02X:%02X:%02X\n", 2485 &lanai->eeprom[EEPROM_MAC]);
2488 lanai->eeprom[EEPROM_MAC + 0],
2489 lanai->eeprom[EEPROM_MAC + 1],
2490 lanai->eeprom[EEPROM_MAC + 2],
2491 lanai->eeprom[EEPROM_MAC + 3],
2492 lanai->eeprom[EEPROM_MAC + 4],
2493 lanai->eeprom[EEPROM_MAC + 5]);
2494 if (left-- == 0) 2486 if (left-- == 0)
2495 return sprintf(page, "status: SOOL=%d, LOCD=%d, LED=%d, " 2487 return sprintf(page, "status: SOOL=%d, LOCD=%d, LED=%d, "
2496 "GPIN=%d\n", (lanai->status & STATUS_SOOL) ? 1 : 0, 2488 "GPIN=%d\n", (lanai->status & STATUS_SOOL) ? 1 : 0,
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 3da804b1627..50838407b11 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -807,9 +807,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
807 } 807 }
808 } 808 }
809 809
810 printk("nicstar%d: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", i, 810 printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
811 card->atmdev->esi[0], card->atmdev->esi[1], card->atmdev->esi[2],
812 card->atmdev->esi[3], card->atmdev->esi[4], card->atmdev->esi[5]);
813 811
814 card->atmdev->dev_data = card; 812 card->atmdev->dev_data = card;
815 card->atmdev->ci_range.vpi_bits = card->vpibits; 813 card->atmdev->ci_range.vpi_bits = card->vpibits;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 051d1ebbd28..5aeb3b541c8 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -380,7 +380,6 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
380 struct ibft_nic *nic = entry->nic; 380 struct ibft_nic *nic = entry->nic;
381 void *ibft_loc = entry->header; 381 void *ibft_loc = entry->header;
382 char *str = buf; 382 char *str = buf;
383 char *mac;
384 int val; 383 int val;
385 384
386 if (!nic) 385 if (!nic)
@@ -421,10 +420,7 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
421 str += sprintf(str, "%d\n", nic->vlan); 420 str += sprintf(str, "%d\n", nic->vlan);
422 break; 421 break;
423 case ibft_eth_mac: 422 case ibft_eth_mac:
424 mac = nic->mac; 423 str += sprintf(str, "%pM\n", nic->mac);
425 str += sprintf(str, "%02x:%02x:%02x:%02x:%02x:%02x\n",
426 (u8)mac[0], (u8)mac[1], (u8)mac[2],
427 (u8)mac[3], (u8)mac[4], (u8)mac[5]);
428 break; 424 break;
429 case ibft_eth_hostname: 425 case ibft_eth_hostname:
430 str += sprintf_string(str, nic->hostname_len, 426 str += sprintf_string(str, nic->hostname_len,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 66b41351910..d94388b81a4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1371,15 +1371,8 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1371 tim.mac_addr = req->dst_mac; 1371 tim.mac_addr = req->dst_mac;
1372 tim.vlan_tag = ntohs(req->vlan_tag); 1372 tim.vlan_tag = ntohs(req->vlan_tag);
1373 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) { 1373 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1374 printk(KERN_ERR 1374 printk(KERN_ERR "%s bad dst mac %pM\n",
1375 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n", 1375 __func__, req->dst_mac);
1376 __func__,
1377 req->dst_mac[0],
1378 req->dst_mac[1],
1379 req->dst_mac[2],
1380 req->dst_mac[3],
1381 req->dst_mac[4],
1382 req->dst_mac[5]);
1383 goto reject; 1376 goto reject;
1384 } 1377 }
1385 1378
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 65bf91e16a4..79f9364aded 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -33,6 +33,7 @@
33#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */ 33#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
36#include <linux/poll.h> 37#include <linux/poll.h>
37#include <linux/capi.h> 38#include <linux/capi.h>
38#include <linux/kernelcapi.h> 39#include <linux/kernelcapi.h>
@@ -1407,114 +1408,84 @@ static void capinc_tty_exit(void)
1407 * /proc/capi/capi20: 1408 * /proc/capi/capi20:
1408 * minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt 1409 * minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
1409 */ 1410 */
1410static int proc_capidev_read_proc(char *page, char **start, off_t off, 1411static int capi20_proc_show(struct seq_file *m, void *v)
1411 int count, int *eof, void *data)
1412{ 1412{
1413 struct capidev *cdev; 1413 struct capidev *cdev;
1414 struct list_head *l; 1414 struct list_head *l;
1415 int len = 0;
1416 1415
1417 read_lock(&capidev_list_lock); 1416 read_lock(&capidev_list_lock);
1418 list_for_each(l, &capidev_list) { 1417 list_for_each(l, &capidev_list) {
1419 cdev = list_entry(l, struct capidev, list); 1418 cdev = list_entry(l, struct capidev, list);
1420 len += sprintf(page+len, "0 %d %lu %lu %lu %lu\n", 1419 seq_printf(m, "0 %d %lu %lu %lu %lu\n",
1421 cdev->ap.applid, 1420 cdev->ap.applid,
1422 cdev->ap.nrecvctlpkt, 1421 cdev->ap.nrecvctlpkt,
1423 cdev->ap.nrecvdatapkt, 1422 cdev->ap.nrecvdatapkt,
1424 cdev->ap.nsentctlpkt, 1423 cdev->ap.nsentctlpkt,
1425 cdev->ap.nsentdatapkt); 1424 cdev->ap.nsentdatapkt);
1426 if (len <= off) {
1427 off -= len;
1428 len = 0;
1429 } else {
1430 if (len-off > count)
1431 goto endloop;
1432 }
1433 } 1425 }
1434
1435endloop:
1436 read_unlock(&capidev_list_lock); 1426 read_unlock(&capidev_list_lock);
1437 if (len < count) 1427 return 0;
1438 *eof = 1;
1439 if (len > count) len = count;
1440 if (len < 0) len = 0;
1441 return len;
1442} 1428}
1443 1429
1430static int capi20_proc_open(struct inode *inode, struct file *file)
1431{
1432 return single_open(file, capi20_proc_show, NULL);
1433}
1434
1435static const struct file_operations capi20_proc_fops = {
1436 .owner = THIS_MODULE,
1437 .open = capi20_proc_open,
1438 .read = seq_read,
1439 .llseek = seq_lseek,
1440 .release = single_release,
1441};
1442
1444/* 1443/*
1445 * /proc/capi/capi20ncci: 1444 * /proc/capi/capi20ncci:
1446 * applid ncci 1445 * applid ncci
1447 */ 1446 */
1448static int proc_capincci_read_proc(char *page, char **start, off_t off, 1447static int capi20ncci_proc_show(struct seq_file *m, void *v)
1449 int count, int *eof, void *data)
1450{ 1448{
1451 struct capidev *cdev; 1449 struct capidev *cdev;
1452 struct capincci *np; 1450 struct capincci *np;
1453 struct list_head *l; 1451 struct list_head *l;
1454 int len = 0;
1455 1452
1456 read_lock(&capidev_list_lock); 1453 read_lock(&capidev_list_lock);
1457 list_for_each(l, &capidev_list) { 1454 list_for_each(l, &capidev_list) {
1458 cdev = list_entry(l, struct capidev, list); 1455 cdev = list_entry(l, struct capidev, list);
1459 for (np=cdev->nccis; np; np = np->next) { 1456 for (np=cdev->nccis; np; np = np->next) {
1460 len += sprintf(page+len, "%d 0x%x\n", 1457 seq_printf(m, "%d 0x%x\n",
1461 cdev->ap.applid, 1458 cdev->ap.applid,
1462 np->ncci); 1459 np->ncci);
1463 if (len <= off) {
1464 off -= len;
1465 len = 0;
1466 } else {
1467 if (len-off > count)
1468 goto endloop;
1469 }
1470 } 1460 }
1471 } 1461 }
1472endloop:
1473 read_unlock(&capidev_list_lock); 1462 read_unlock(&capidev_list_lock);
1474 *start = page+off; 1463 return 0;
1475 if (len < count)
1476 *eof = 1;
1477 if (len>count) len = count;
1478 if (len<0) len = 0;
1479 return len;
1480} 1464}
1481 1465
1482static struct procfsentries { 1466static int capi20ncci_proc_open(struct inode *inode, struct file *file)
1483 char *name; 1467{
1484 mode_t mode; 1468 return single_open(file, capi20ncci_proc_show, NULL);
1485 int (*read_proc)(char *page, char **start, off_t off, 1469}
1486 int count, int *eof, void *data); 1470
1487 struct proc_dir_entry *procent; 1471static const struct file_operations capi20ncci_proc_fops = {
1488} procfsentries[] = { 1472 .owner = THIS_MODULE,
1489 /* { "capi", S_IFDIR, 0 }, */ 1473 .open = capi20ncci_proc_open,
1490 { "capi/capi20", 0 , proc_capidev_read_proc }, 1474 .read = seq_read,
1491 { "capi/capi20ncci", 0 , proc_capincci_read_proc }, 1475 .llseek = seq_lseek,
1476 .release = single_release,
1492}; 1477};
1493 1478
1494static void __init proc_init(void) 1479static void __init proc_init(void)
1495{ 1480{
1496 int nelem = ARRAY_SIZE(procfsentries); 1481 proc_create("capi/capi20", 0, NULL, &capi20_proc_fops);
1497 int i; 1482 proc_create("capi/capi20ncci", 0, NULL, &capi20ncci_proc_fops);
1498
1499 for (i=0; i < nelem; i++) {
1500 struct procfsentries *p = procfsentries + i;
1501 p->procent = create_proc_entry(p->name, p->mode, NULL);
1502 if (p->procent) p->procent->read_proc = p->read_proc;
1503 }
1504} 1483}
1505 1484
1506static void __exit proc_exit(void) 1485static void __exit proc_exit(void)
1507{ 1486{
1508 int nelem = ARRAY_SIZE(procfsentries); 1487 remove_proc_entry("capi/capi20", NULL);
1509 int i; 1488 remove_proc_entry("capi/capi20ncci", NULL);
1510
1511 for (i=nelem-1; i >= 0; i--) {
1512 struct procfsentries *p = procfsentries + i;
1513 if (p->procent) {
1514 remove_proc_entry(p->name, NULL);
1515 p->procent = NULL;
1516 }
1517 }
1518} 1489}
1519 1490
1520/* -------- init function and module interface ---------------------- */ 1491/* -------- init function and module interface ---------------------- */
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 66b7d7a8647..bb450152fb7 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -24,6 +24,7 @@
24#include <linux/isdn.h> 24#include <linux/isdn.h>
25#include <linux/isdnif.h> 25#include <linux/isdnif.h>
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
27#include <linux/capi.h> 28#include <linux/capi.h>
28#include <linux/kernelcapi.h> 29#include <linux/kernelcapi.h>
29#include <linux/ctype.h> 30#include <linux/ctype.h>
@@ -2229,59 +2230,37 @@ static void lower_callback(unsigned int cmd, u32 contr, void *data)
2229 * /proc/capi/capidrv: 2230 * /proc/capi/capidrv:
2230 * nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt 2231 * nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
2231 */ 2232 */
2232static int proc_capidrv_read_proc(char *page, char **start, off_t off, 2233static int capidrv_proc_show(struct seq_file *m, void *v)
2233 int count, int *eof, void *data)
2234{ 2234{
2235 int len = 0; 2235 seq_printf(m, "%lu %lu %lu %lu\n",
2236
2237 len += sprintf(page+len, "%lu %lu %lu %lu\n",
2238 global.ap.nrecvctlpkt, 2236 global.ap.nrecvctlpkt,
2239 global.ap.nrecvdatapkt, 2237 global.ap.nrecvdatapkt,
2240 global.ap.nsentctlpkt, 2238 global.ap.nsentctlpkt,
2241 global.ap.nsentdatapkt); 2239 global.ap.nsentdatapkt);
2242 if (off+count >= len) 2240 return 0;
2243 *eof = 1; 2241}
2244 if (len < off) 2242
2245 return 0; 2243static int capidrv_proc_open(struct inode *inode, struct file *file)
2246 *start = page + off; 2244{
2247 return ((count < len-off) ? count : len-off); 2245 return single_open(file, capidrv_proc_show, NULL);
2248} 2246}
2249 2247
2250static struct procfsentries { 2248static const struct file_operations capidrv_proc_fops = {
2251 char *name; 2249 .owner = THIS_MODULE,
2252 mode_t mode; 2250 .open = capidrv_proc_open,
2253 int (*read_proc)(char *page, char **start, off_t off, 2251 .read = seq_read,
2254 int count, int *eof, void *data); 2252 .llseek = seq_lseek,
2255 struct proc_dir_entry *procent; 2253 .release = single_release,
2256} procfsentries[] = {
2257 /* { "capi", S_IFDIR, 0 }, */
2258 { "capi/capidrv", 0 , proc_capidrv_read_proc },
2259}; 2254};
2260 2255
2261static void __init proc_init(void) 2256static void __init proc_init(void)
2262{ 2257{
2263 int nelem = ARRAY_SIZE(procfsentries); 2258 proc_create("capi/capidrv", 0, NULL, &capidrv_proc_fops);
2264 int i;
2265
2266 for (i=0; i < nelem; i++) {
2267 struct procfsentries *p = procfsentries + i;
2268 p->procent = create_proc_entry(p->name, p->mode, NULL);
2269 if (p->procent) p->procent->read_proc = p->read_proc;
2270 }
2271} 2259}
2272 2260
2273static void __exit proc_exit(void) 2261static void __exit proc_exit(void)
2274{ 2262{
2275 int nelem = ARRAY_SIZE(procfsentries); 2263 remove_proc_entry("capi/capidrv", NULL);
2276 int i;
2277
2278 for (i=nelem-1; i >= 0; i--) {
2279 struct procfsentries *p = procfsentries + i;
2280 if (p->procent) {
2281 remove_proc_entry(p->name, NULL);
2282 p->procent = NULL;
2283 }
2284 }
2285} 2264}
2286 2265
2287static int __init capidrv_init(void) 2266static int __init capidrv_init(void)
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index dc506ab99ca..b0bacf377c1 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -490,13 +490,7 @@ attach_capi_ctr(struct capi_ctr *card)
490 card->traceflag = showcapimsgs; 490 card->traceflag = showcapimsgs;
491 491
492 sprintf(card->procfn, "capi/controllers/%d", card->cnr); 492 sprintf(card->procfn, "capi/controllers/%d", card->cnr);
493 card->procent = create_proc_entry(card->procfn, 0, NULL); 493 card->procent = proc_create_data(card->procfn, 0, NULL, card->proc_fops, card);
494 if (card->procent) {
495 card->procent->read_proc =
496 (int (*)(char *,char **,off_t,int,int *,void *))
497 card->ctr_read_proc;
498 card->procent->data = card;
499 }
500 494
501 ncards++; 495 ncards++;
502 printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n", 496 printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n",
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 3f5cd06af10..6f0ae32906b 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -13,6 +13,8 @@
13 13
14#include "gigaset.h" 14#include "gigaset.h"
15#include <linux/ctype.h> 15#include <linux/ctype.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
16#include <linux/isdn/capilli.h> 18#include <linux/isdn/capilli.h>
17#include <linux/isdn/capicmd.h> 19#include <linux/isdn/capicmd.h>
18#include <linux/isdn/capiutil.h> 20#include <linux/isdn/capiutil.h>
@@ -2106,35 +2108,22 @@ static char *gigaset_procinfo(struct capi_ctr *ctr)
2106 return ctr->name; /* ToDo: more? */ 2108 return ctr->name; /* ToDo: more? */
2107} 2109}
2108 2110
2109/** 2111static int gigaset_proc_show(struct seq_file *m, void *v)
2110 * gigaset_ctr_read_proc() - build controller proc file entry
2111 * @page: buffer of PAGE_SIZE bytes for receiving the entry.
2112 * @start: unused.
2113 * @off: unused.
2114 * @count: unused.
2115 * @eof: unused.
2116 * @ctr: controller descriptor structure.
2117 *
2118 * Return value: length of generated entry
2119 */
2120static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2121 int count, int *eof, struct capi_ctr *ctr)
2122{ 2112{
2113 struct capi_ctr *ctr = m->private;
2123 struct cardstate *cs = ctr->driverdata; 2114 struct cardstate *cs = ctr->driverdata;
2124 char *s; 2115 char *s;
2125 int i; 2116 int i;
2126 int len = 0; 2117
2127 len += sprintf(page+len, "%-16s %s\n", "name", ctr->name); 2118 seq_printf(m, "%-16s %s\n", "name", ctr->name);
2128 len += sprintf(page+len, "%-16s %s %s\n", "dev", 2119 seq_printf(m, "%-16s %s %s\n", "dev",
2129 dev_driver_string(cs->dev), dev_name(cs->dev)); 2120 dev_driver_string(cs->dev), dev_name(cs->dev));
2130 len += sprintf(page+len, "%-16s %d\n", "id", cs->myid); 2121 seq_printf(m, "%-16s %d\n", "id", cs->myid);
2131 if (cs->gotfwver) 2122 if (cs->gotfwver)
2132 len += sprintf(page+len, "%-16s %d.%d.%d.%d\n", "firmware", 2123 seq_printf(m, "%-16s %d.%d.%d.%d\n", "firmware",
2133 cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]); 2124 cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
2134 len += sprintf(page+len, "%-16s %d\n", "channels", 2125 seq_printf(m, "%-16s %d\n", "channels", cs->channels);
2135 cs->channels); 2126 seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no");
2136 len += sprintf(page+len, "%-16s %s\n", "onechannel",
2137 cs->onechannel ? "yes" : "no");
2138 2127
2139 switch (cs->mode) { 2128 switch (cs->mode) {
2140 case M_UNKNOWN: 2129 case M_UNKNOWN:
@@ -2152,7 +2141,7 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2152 default: 2141 default:
2153 s = "??"; 2142 s = "??";
2154 } 2143 }
2155 len += sprintf(page+len, "%-16s %s\n", "mode", s); 2144 seq_printf(m, "%-16s %s\n", "mode", s);
2156 2145
2157 switch (cs->mstate) { 2146 switch (cs->mstate) {
2158 case MS_UNINITIALIZED: 2147 case MS_UNINITIALIZED:
@@ -2176,25 +2165,21 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2176 default: 2165 default:
2177 s = "??"; 2166 s = "??";
2178 } 2167 }
2179 len += sprintf(page+len, "%-16s %s\n", "mstate", s); 2168 seq_printf(m, "%-16s %s\n", "mstate", s);
2180 2169
2181 len += sprintf(page+len, "%-16s %s\n", "running", 2170 seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no");
2182 cs->running ? "yes" : "no"); 2171 seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no");
2183 len += sprintf(page+len, "%-16s %s\n", "connected", 2172 seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no");
2184 cs->connected ? "yes" : "no"); 2173 seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no");
2185 len += sprintf(page+len, "%-16s %s\n", "isdn_up",
2186 cs->isdn_up ? "yes" : "no");
2187 len += sprintf(page+len, "%-16s %s\n", "cidmode",
2188 cs->cidmode ? "yes" : "no");
2189 2174
2190 for (i = 0; i < cs->channels; i++) { 2175 for (i = 0; i < cs->channels; i++) {
2191 len += sprintf(page+len, "[%d]%-13s %d\n", i, "corrupted", 2176 seq_printf(m, "[%d]%-13s %d\n", i, "corrupted",
2192 cs->bcs[i].corrupted); 2177 cs->bcs[i].corrupted);
2193 len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_down", 2178 seq_printf(m, "[%d]%-13s %d\n", i, "trans_down",
2194 cs->bcs[i].trans_down); 2179 cs->bcs[i].trans_down);
2195 len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_up", 2180 seq_printf(m, "[%d]%-13s %d\n", i, "trans_up",
2196 cs->bcs[i].trans_up); 2181 cs->bcs[i].trans_up);
2197 len += sprintf(page+len, "[%d]%-13s %d\n", i, "chstate", 2182 seq_printf(m, "[%d]%-13s %d\n", i, "chstate",
2198 cs->bcs[i].chstate); 2183 cs->bcs[i].chstate);
2199 switch (cs->bcs[i].proto2) { 2184 switch (cs->bcs[i].proto2) {
2200 case L2_BITSYNC: 2185 case L2_BITSYNC:
@@ -2209,11 +2194,23 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2209 default: 2194 default:
2210 s = "??"; 2195 s = "??";
2211 } 2196 }
2212 len += sprintf(page+len, "[%d]%-13s %s\n", i, "proto2", s); 2197 seq_printf(m, "[%d]%-13s %s\n", i, "proto2", s);
2213 } 2198 }
2214 return len; 2199 return 0;
2215} 2200}
2216 2201
2202static int gigaset_proc_open(struct inode *inode, struct file *file)
2203{
2204 return single_open(file, gigaset_proc_show, PDE(inode)->data);
2205}
2206
2207static const struct file_operations gigaset_proc_fops = {
2208 .owner = THIS_MODULE,
2209 .open = gigaset_proc_open,
2210 .read = seq_read,
2211 .llseek = seq_lseek,
2212 .release = single_release,
2213};
2217 2214
2218static struct capi_driver capi_driver_gigaset = { 2215static struct capi_driver capi_driver_gigaset = {
2219 .name = "gigaset", 2216 .name = "gigaset",
@@ -2256,7 +2253,7 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
2256 iif->ctr.release_appl = gigaset_release_appl; 2253 iif->ctr.release_appl = gigaset_release_appl;
2257 iif->ctr.send_message = gigaset_send_message; 2254 iif->ctr.send_message = gigaset_send_message;
2258 iif->ctr.procinfo = gigaset_procinfo; 2255 iif->ctr.procinfo = gigaset_procinfo;
2259 iif->ctr.ctr_read_proc = gigaset_ctr_read_proc; 2256 iif->ctr.proc_fops = &gigaset_proc_fops;
2260 INIT_LIST_HEAD(&iif->appls); 2257 INIT_LIST_HEAD(&iif->appls);
2261 skb_queue_head_init(&iif->sendqueue); 2258 skb_queue_head_init(&iif->sendqueue);
2262 atomic_set(&iif->sendqlen, 0); 2259 atomic_set(&iif->sendqlen, 0);
diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/isdn/hardware/avm/avmcard.h
index d964f07e4a5..a70e8854461 100644
--- a/drivers/isdn/hardware/avm/avmcard.h
+++ b/drivers/isdn/hardware/avm/avmcard.h
@@ -556,8 +556,7 @@ u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
556void b1_parse_version(avmctrl_info *card); 556void b1_parse_version(avmctrl_info *card);
557irqreturn_t b1_interrupt(int interrupt, void *devptr); 557irqreturn_t b1_interrupt(int interrupt, void *devptr);
558 558
559int b1ctl_read_proc(char *page, char **start, off_t off, 559extern const struct file_operations b1ctl_proc_fops;
560 int count, int *eof, struct capi_ctr *ctrl);
561 560
562avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *, 561avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *,
563 long rsize, long ssize); 562 long rsize, long ssize);
@@ -577,7 +576,6 @@ void b1dma_register_appl(struct capi_ctr *ctrl,
577 capi_register_params *rp); 576 capi_register_params *rp);
578void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl); 577void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl);
579u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb); 578u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
580int b1dmactl_read_proc(char *page, char **start, off_t off, 579extern const struct file_operations b1dmactl_proc_fops;
581 int count, int *eof, struct capi_ctr *ctrl);
582 580
583#endif /* _AVMCARD_H_ */ 581#endif /* _AVMCARD_H_ */
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index a7c0083e78a..c38fa0f4c72 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -12,6 +12,8 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
15#include <linux/skbuff.h> 17#include <linux/skbuff.h>
16#include <linux/delay.h> 18#include <linux/delay.h>
17#include <linux/mm.h> 19#include <linux/mm.h>
@@ -634,18 +636,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
634} 636}
635 637
636/* ------------------------------------------------------------- */ 638/* ------------------------------------------------------------- */
637int b1ctl_read_proc(char *page, char **start, off_t off, 639static int b1ctl_proc_show(struct seq_file *m, void *v)
638 int count, int *eof, struct capi_ctr *ctrl)
639{ 640{
641 struct capi_ctr *ctrl = m->private;
640 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); 642 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
641 avmcard *card = cinfo->card; 643 avmcard *card = cinfo->card;
642 u8 flag; 644 u8 flag;
643 int len = 0;
644 char *s; 645 char *s;
645 646
646 len += sprintf(page+len, "%-16s %s\n", "name", card->name); 647 seq_printf(m, "%-16s %s\n", "name", card->name);
647 len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port); 648 seq_printf(m, "%-16s 0x%x\n", "io", card->port);
648 len += sprintf(page+len, "%-16s %d\n", "irq", card->irq); 649 seq_printf(m, "%-16s %d\n", "irq", card->irq);
649 switch (card->cardtype) { 650 switch (card->cardtype) {
650 case avm_b1isa: s = "B1 ISA"; break; 651 case avm_b1isa: s = "B1 ISA"; break;
651 case avm_b1pci: s = "B1 PCI"; break; 652 case avm_b1pci: s = "B1 PCI"; break;
@@ -658,20 +659,20 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
658 case avm_c2: s = "C2"; break; 659 case avm_c2: s = "C2"; break;
659 default: s = "???"; break; 660 default: s = "???"; break;
660 } 661 }
661 len += sprintf(page+len, "%-16s %s\n", "type", s); 662 seq_printf(m, "%-16s %s\n", "type", s);
662 if (card->cardtype == avm_t1isa) 663 if (card->cardtype == avm_t1isa)
663 len += sprintf(page+len, "%-16s %d\n", "cardnr", card->cardnr); 664 seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
664 if ((s = cinfo->version[VER_DRIVER]) != NULL) 665 if ((s = cinfo->version[VER_DRIVER]) != NULL)
665 len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); 666 seq_printf(m, "%-16s %s\n", "ver_driver", s);
666 if ((s = cinfo->version[VER_CARDTYPE]) != NULL) 667 if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
667 len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); 668 seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
668 if ((s = cinfo->version[VER_SERIAL]) != NULL) 669 if ((s = cinfo->version[VER_SERIAL]) != NULL)
669 len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); 670 seq_printf(m, "%-16s %s\n", "ver_serial", s);
670 671
671 if (card->cardtype != avm_m1) { 672 if (card->cardtype != avm_m1) {
672 flag = ((u8 *)(ctrl->profile.manu))[3]; 673 flag = ((u8 *)(ctrl->profile.manu))[3];
673 if (flag) 674 if (flag)
674 len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n", 675 seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
675 "protocol", 676 "protocol",
676 (flag & 0x01) ? " DSS1" : "", 677 (flag & 0x01) ? " DSS1" : "",
677 (flag & 0x02) ? " CT1" : "", 678 (flag & 0x02) ? " CT1" : "",
@@ -685,7 +686,7 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
685 if (card->cardtype != avm_m1) { 686 if (card->cardtype != avm_m1) {
686 flag = ((u8 *)(ctrl->profile.manu))[5]; 687 flag = ((u8 *)(ctrl->profile.manu))[5];
687 if (flag) 688 if (flag)
688 len += sprintf(page+len, "%-16s%s%s%s%s\n", 689 seq_printf(m, "%-16s%s%s%s%s\n",
689 "linetype", 690 "linetype",
690 (flag & 0x01) ? " point to point" : "", 691 (flag & 0x01) ? " point to point" : "",
691 (flag & 0x02) ? " point to multipoint" : "", 692 (flag & 0x02) ? " point to multipoint" : "",
@@ -693,16 +694,25 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
693 (flag & 0x04) ? " leased line with D-channel" : "" 694 (flag & 0x04) ? " leased line with D-channel" : ""
694 ); 695 );
695 } 696 }
696 len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); 697 seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
697 698
698 if (off+count >= len) 699 return 0;
699 *eof = 1; 700}
700 if (len < off) 701
701 return 0; 702static int b1ctl_proc_open(struct inode *inode, struct file *file)
702 *start = page + off; 703{
703 return ((count < len-off) ? count : len-off); 704 return single_open(file, b1ctl_proc_show, PDE(inode)->data);
704} 705}
705 706
707const struct file_operations b1ctl_proc_fops = {
708 .owner = THIS_MODULE,
709 .open = b1ctl_proc_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713};
714EXPORT_SYMBOL(b1ctl_proc_fops);
715
706/* ------------------------------------------------------------- */ 716/* ------------------------------------------------------------- */
707 717
708#ifdef CONFIG_PCI 718#ifdef CONFIG_PCI
@@ -781,8 +791,6 @@ EXPORT_SYMBOL(b1_send_message);
781EXPORT_SYMBOL(b1_parse_version); 791EXPORT_SYMBOL(b1_parse_version);
782EXPORT_SYMBOL(b1_interrupt); 792EXPORT_SYMBOL(b1_interrupt);
783 793
784EXPORT_SYMBOL(b1ctl_read_proc);
785
786static int __init b1_init(void) 794static int __init b1_init(void)
787{ 795{
788 char *p; 796 char *p;
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 0e84aaae43f..124550d0dbf 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -11,6 +11,8 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
15#include <linux/delay.h> 17#include <linux/delay.h>
16#include <linux/mm.h> 18#include <linux/mm.h>
@@ -855,21 +857,20 @@ u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
855 857
856/* ------------------------------------------------------------- */ 858/* ------------------------------------------------------------- */
857 859
858int b1dmactl_read_proc(char *page, char **start, off_t off, 860static int b1dmactl_proc_show(struct seq_file *m, void *v)
859 int count, int *eof, struct capi_ctr *ctrl)
860{ 861{
862 struct capi_ctr *ctrl = m->private;
861 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); 863 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
862 avmcard *card = cinfo->card; 864 avmcard *card = cinfo->card;
863 u8 flag; 865 u8 flag;
864 int len = 0;
865 char *s; 866 char *s;
866 u32 txoff, txlen, rxoff, rxlen, csr; 867 u32 txoff, txlen, rxoff, rxlen, csr;
867 unsigned long flags; 868 unsigned long flags;
868 869
869 len += sprintf(page+len, "%-16s %s\n", "name", card->name); 870 seq_printf(m, "%-16s %s\n", "name", card->name);
870 len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port); 871 seq_printf(m, "%-16s 0x%x\n", "io", card->port);
871 len += sprintf(page+len, "%-16s %d\n", "irq", card->irq); 872 seq_printf(m, "%-16s %d\n", "irq", card->irq);
872 len += sprintf(page+len, "%-16s 0x%lx\n", "membase", card->membase); 873 seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
873 switch (card->cardtype) { 874 switch (card->cardtype) {
874 case avm_b1isa: s = "B1 ISA"; break; 875 case avm_b1isa: s = "B1 ISA"; break;
875 case avm_b1pci: s = "B1 PCI"; break; 876 case avm_b1pci: s = "B1 PCI"; break;
@@ -882,18 +883,18 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
882 case avm_c2: s = "C2"; break; 883 case avm_c2: s = "C2"; break;
883 default: s = "???"; break; 884 default: s = "???"; break;
884 } 885 }
885 len += sprintf(page+len, "%-16s %s\n", "type", s); 886 seq_printf(m, "%-16s %s\n", "type", s);
886 if ((s = cinfo->version[VER_DRIVER]) != NULL) 887 if ((s = cinfo->version[VER_DRIVER]) != NULL)
887 len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); 888 seq_printf(m, "%-16s %s\n", "ver_driver", s);
888 if ((s = cinfo->version[VER_CARDTYPE]) != NULL) 889 if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
889 len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); 890 seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
890 if ((s = cinfo->version[VER_SERIAL]) != NULL) 891 if ((s = cinfo->version[VER_SERIAL]) != NULL)
891 len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); 892 seq_printf(m, "%-16s %s\n", "ver_serial", s);
892 893
893 if (card->cardtype != avm_m1) { 894 if (card->cardtype != avm_m1) {
894 flag = ((u8 *)(ctrl->profile.manu))[3]; 895 flag = ((u8 *)(ctrl->profile.manu))[3];
895 if (flag) 896 if (flag)
896 len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n", 897 seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
897 "protocol", 898 "protocol",
898 (flag & 0x01) ? " DSS1" : "", 899 (flag & 0x01) ? " DSS1" : "",
899 (flag & 0x02) ? " CT1" : "", 900 (flag & 0x02) ? " CT1" : "",
@@ -907,7 +908,7 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
907 if (card->cardtype != avm_m1) { 908 if (card->cardtype != avm_m1) {
908 flag = ((u8 *)(ctrl->profile.manu))[5]; 909 flag = ((u8 *)(ctrl->profile.manu))[5];
909 if (flag) 910 if (flag)
910 len += sprintf(page+len, "%-16s%s%s%s%s\n", 911 seq_printf(m, "%-16s%s%s%s%s\n",
911 "linetype", 912 "linetype",
912 (flag & 0x01) ? " point to point" : "", 913 (flag & 0x01) ? " point to point" : "",
913 (flag & 0x02) ? " point to multipoint" : "", 914 (flag & 0x02) ? " point to multipoint" : "",
@@ -915,7 +916,7 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
915 (flag & 0x04) ? " leased line with D-channel" : "" 916 (flag & 0x04) ? " leased line with D-channel" : ""
916 ); 917 );
917 } 918 }
918 len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); 919 seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
919 920
920 921
921 spin_lock_irqsave(&card->lock, flags); 922 spin_lock_irqsave(&card->lock, flags);
@@ -930,27 +931,30 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
930 931
931 spin_unlock_irqrestore(&card->lock, flags); 932 spin_unlock_irqrestore(&card->lock, flags);
932 933
933 len += sprintf(page+len, "%-16s 0x%lx\n", 934 seq_printf(m, "%-16s 0x%lx\n", "csr (cached)", (unsigned long)card->csr);
934 "csr (cached)", (unsigned long)card->csr); 935 seq_printf(m, "%-16s 0x%lx\n", "csr", (unsigned long)csr);
935 len += sprintf(page+len, "%-16s 0x%lx\n", 936 seq_printf(m, "%-16s %lu\n", "txoff", (unsigned long)txoff);
936 "csr", (unsigned long)csr); 937 seq_printf(m, "%-16s %lu\n", "txlen", (unsigned long)txlen);
937 len += sprintf(page+len, "%-16s %lu\n", 938 seq_printf(m, "%-16s %lu\n", "rxoff", (unsigned long)rxoff);
938 "txoff", (unsigned long)txoff); 939 seq_printf(m, "%-16s %lu\n", "rxlen", (unsigned long)rxlen);
939 len += sprintf(page+len, "%-16s %lu\n", 940
940 "txlen", (unsigned long)txlen); 941 return 0;
941 len += sprintf(page+len, "%-16s %lu\n", 942}
942 "rxoff", (unsigned long)rxoff); 943
943 len += sprintf(page+len, "%-16s %lu\n", 944static int b1dmactl_proc_open(struct inode *inode, struct file *file)
944 "rxlen", (unsigned long)rxlen); 945{
945 946 return single_open(file, b1dmactl_proc_show, PDE(inode)->data);
946 if (off+count >= len)
947 *eof = 1;
948 if (len < off)
949 return 0;
950 *start = page + off;
951 return ((count < len-off) ? count : len-off);
952} 947}
953 948
949const struct file_operations b1dmactl_proc_fops = {
950 .owner = THIS_MODULE,
951 .open = b1dmactl_proc_open,
952 .read = seq_read,
953 .llseek = seq_lseek,
954 .release = single_release,
955};
956EXPORT_SYMBOL(b1dmactl_proc_fops);
957
954/* ------------------------------------------------------------- */ 958/* ------------------------------------------------------------- */
955 959
956EXPORT_SYMBOL(b1dma_reset); 960EXPORT_SYMBOL(b1dma_reset);
@@ -963,7 +967,6 @@ EXPORT_SYMBOL(b1dma_reset_ctr);
963EXPORT_SYMBOL(b1dma_register_appl); 967EXPORT_SYMBOL(b1dma_register_appl);
964EXPORT_SYMBOL(b1dma_release_appl); 968EXPORT_SYMBOL(b1dma_release_appl);
965EXPORT_SYMBOL(b1dma_send_message); 969EXPORT_SYMBOL(b1dma_send_message);
966EXPORT_SYMBOL(b1dmactl_read_proc);
967 970
968static int __init b1dma_init(void) 971static int __init b1dma_init(void)
969{ 972{
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c
index 6461a32bc83..ff5390546f9 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/isdn/hardware/avm/b1isa.c
@@ -121,7 +121,7 @@ static int b1isa_probe(struct pci_dev *pdev)
121 cinfo->capi_ctrl.load_firmware = b1_load_firmware; 121 cinfo->capi_ctrl.load_firmware = b1_load_firmware;
122 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; 122 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
123 cinfo->capi_ctrl.procinfo = b1isa_procinfo; 123 cinfo->capi_ctrl.procinfo = b1isa_procinfo;
124 cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc; 124 cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
125 strcpy(cinfo->capi_ctrl.name, card->name); 125 strcpy(cinfo->capi_ctrl.name, card->name);
126 126
127 retval = attach_capi_ctr(&cinfo->capi_ctrl); 127 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/b1pci.c b/drivers/isdn/hardware/avm/b1pci.c
index 5b314a2c404..c97e4315079 100644
--- a/drivers/isdn/hardware/avm/b1pci.c
+++ b/drivers/isdn/hardware/avm/b1pci.c
@@ -112,7 +112,7 @@ static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
112 cinfo->capi_ctrl.load_firmware = b1_load_firmware; 112 cinfo->capi_ctrl.load_firmware = b1_load_firmware;
113 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; 113 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
114 cinfo->capi_ctrl.procinfo = b1pci_procinfo; 114 cinfo->capi_ctrl.procinfo = b1pci_procinfo;
115 cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc; 115 cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
116 strcpy(cinfo->capi_ctrl.name, card->name); 116 strcpy(cinfo->capi_ctrl.name, card->name);
117 cinfo->capi_ctrl.owner = THIS_MODULE; 117 cinfo->capi_ctrl.owner = THIS_MODULE;
118 118
@@ -251,7 +251,7 @@ static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
251 cinfo->capi_ctrl.load_firmware = b1dma_load_firmware; 251 cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
252 cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr; 252 cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
253 cinfo->capi_ctrl.procinfo = b1pciv4_procinfo; 253 cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
254 cinfo->capi_ctrl.ctr_read_proc = b1dmactl_read_proc; 254 cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
255 strcpy(cinfo->capi_ctrl.name, card->name); 255 strcpy(cinfo->capi_ctrl.name, card->name);
256 256
257 retval = attach_capi_ctr(&cinfo->capi_ctrl); 257 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/b1pcmcia.c b/drivers/isdn/hardware/avm/b1pcmcia.c
index 7740403b40e..d6391e0afee 100644
--- a/drivers/isdn/hardware/avm/b1pcmcia.c
+++ b/drivers/isdn/hardware/avm/b1pcmcia.c
@@ -108,7 +108,7 @@ static int b1pcmcia_add_card(unsigned int port, unsigned irq,
108 cinfo->capi_ctrl.load_firmware = b1_load_firmware; 108 cinfo->capi_ctrl.load_firmware = b1_load_firmware;
109 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; 109 cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
110 cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo; 110 cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo;
111 cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc; 111 cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
112 strcpy(cinfo->capi_ctrl.name, card->name); 112 strcpy(cinfo->capi_ctrl.name, card->name);
113 113
114 retval = attach_capi_ctr(&cinfo->capi_ctrl); 114 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 6833301a45f..de6e6b31181 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -11,6 +11,8 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
15#include <linux/delay.h> 17#include <linux/delay.h>
16#include <linux/mm.h> 18#include <linux/mm.h>
@@ -1062,19 +1064,18 @@ static char *c4_procinfo(struct capi_ctr *ctrl)
1062 return cinfo->infobuf; 1064 return cinfo->infobuf;
1063} 1065}
1064 1066
1065static int c4_read_proc(char *page, char **start, off_t off, 1067static int c4_proc_show(struct seq_file *m, void *v)
1066 int count, int *eof, struct capi_ctr *ctrl)
1067{ 1068{
1069 struct capi_ctr *ctrl = m->private;
1068 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); 1070 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
1069 avmcard *card = cinfo->card; 1071 avmcard *card = cinfo->card;
1070 u8 flag; 1072 u8 flag;
1071 int len = 0;
1072 char *s; 1073 char *s;
1073 1074
1074 len += sprintf(page+len, "%-16s %s\n", "name", card->name); 1075 seq_printf(m, "%-16s %s\n", "name", card->name);
1075 len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port); 1076 seq_printf(m, "%-16s 0x%x\n", "io", card->port);
1076 len += sprintf(page+len, "%-16s %d\n", "irq", card->irq); 1077 seq_printf(m, "%-16s %d\n", "irq", card->irq);
1077 len += sprintf(page+len, "%-16s 0x%lx\n", "membase", card->membase); 1078 seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
1078 switch (card->cardtype) { 1079 switch (card->cardtype) {
1079 case avm_b1isa: s = "B1 ISA"; break; 1080 case avm_b1isa: s = "B1 ISA"; break;
1080 case avm_b1pci: s = "B1 PCI"; break; 1081 case avm_b1pci: s = "B1 PCI"; break;
@@ -1087,18 +1088,18 @@ static int c4_read_proc(char *page, char **start, off_t off,
1087 case avm_c2: s = "C2"; break; 1088 case avm_c2: s = "C2"; break;
1088 default: s = "???"; break; 1089 default: s = "???"; break;
1089 } 1090 }
1090 len += sprintf(page+len, "%-16s %s\n", "type", s); 1091 seq_printf(m, "%-16s %s\n", "type", s);
1091 if ((s = cinfo->version[VER_DRIVER]) != NULL) 1092 if ((s = cinfo->version[VER_DRIVER]) != NULL)
1092 len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); 1093 seq_printf(m, "%-16s %s\n", "ver_driver", s);
1093 if ((s = cinfo->version[VER_CARDTYPE]) != NULL) 1094 if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
1094 len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); 1095 seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
1095 if ((s = cinfo->version[VER_SERIAL]) != NULL) 1096 if ((s = cinfo->version[VER_SERIAL]) != NULL)
1096 len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); 1097 seq_printf(m, "%-16s %s\n", "ver_serial", s);
1097 1098
1098 if (card->cardtype != avm_m1) { 1099 if (card->cardtype != avm_m1) {
1099 flag = ((u8 *)(ctrl->profile.manu))[3]; 1100 flag = ((u8 *)(ctrl->profile.manu))[3];
1100 if (flag) 1101 if (flag)
1101 len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n", 1102 seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
1102 "protocol", 1103 "protocol",
1103 (flag & 0x01) ? " DSS1" : "", 1104 (flag & 0x01) ? " DSS1" : "",
1104 (flag & 0x02) ? " CT1" : "", 1105 (flag & 0x02) ? " CT1" : "",
@@ -1112,7 +1113,7 @@ static int c4_read_proc(char *page, char **start, off_t off,
1112 if (card->cardtype != avm_m1) { 1113 if (card->cardtype != avm_m1) {
1113 flag = ((u8 *)(ctrl->profile.manu))[5]; 1114 flag = ((u8 *)(ctrl->profile.manu))[5];
1114 if (flag) 1115 if (flag)
1115 len += sprintf(page+len, "%-16s%s%s%s%s\n", 1116 seq_printf(m, "%-16s%s%s%s%s\n",
1116 "linetype", 1117 "linetype",
1117 (flag & 0x01) ? " point to point" : "", 1118 (flag & 0x01) ? " point to point" : "",
1118 (flag & 0x02) ? " point to multipoint" : "", 1119 (flag & 0x02) ? " point to multipoint" : "",
@@ -1120,16 +1121,24 @@ static int c4_read_proc(char *page, char **start, off_t off,
1120 (flag & 0x04) ? " leased line with D-channel" : "" 1121 (flag & 0x04) ? " leased line with D-channel" : ""
1121 ); 1122 );
1122 } 1123 }
1123 len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); 1124 seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
1124 1125
1125 if (off+count >= len) 1126 return 0;
1126 *eof = 1;
1127 if (len < off)
1128 return 0;
1129 *start = page + off;
1130 return ((count < len-off) ? count : len-off);
1131} 1127}
1132 1128
1129static int c4_proc_open(struct inode *inode, struct file *file)
1130{
1131 return single_open(file, c4_proc_show, PDE(inode)->data);
1132}
1133
1134static const struct file_operations c4_proc_fops = {
1135 .owner = THIS_MODULE,
1136 .open = c4_proc_open,
1137 .read = seq_read,
1138 .llseek = seq_lseek,
1139 .release = single_release,
1140};
1141
1133/* ------------------------------------------------------------- */ 1142/* ------------------------------------------------------------- */
1134 1143
1135static int c4_add_card(struct capicardparams *p, struct pci_dev *dev, 1144static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
@@ -1201,7 +1210,7 @@ static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
1201 cinfo->capi_ctrl.load_firmware = c4_load_firmware; 1210 cinfo->capi_ctrl.load_firmware = c4_load_firmware;
1202 cinfo->capi_ctrl.reset_ctr = c4_reset_ctr; 1211 cinfo->capi_ctrl.reset_ctr = c4_reset_ctr;
1203 cinfo->capi_ctrl.procinfo = c4_procinfo; 1212 cinfo->capi_ctrl.procinfo = c4_procinfo;
1204 cinfo->capi_ctrl.ctr_read_proc = c4_read_proc; 1213 cinfo->capi_ctrl.proc_fops = &c4_proc_fops;
1205 strcpy(cinfo->capi_ctrl.name, card->name); 1214 strcpy(cinfo->capi_ctrl.name, card->name);
1206 1215
1207 retval = attach_capi_ctr(&cinfo->capi_ctrl); 1216 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/t1isa.c b/drivers/isdn/hardware/avm/t1isa.c
index 1c53fd49adb..baeeb3c2a3e 100644
--- a/drivers/isdn/hardware/avm/t1isa.c
+++ b/drivers/isdn/hardware/avm/t1isa.c
@@ -429,7 +429,7 @@ static int t1isa_probe(struct pci_dev *pdev, int cardnr)
429 cinfo->capi_ctrl.load_firmware = t1isa_load_firmware; 429 cinfo->capi_ctrl.load_firmware = t1isa_load_firmware;
430 cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr; 430 cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr;
431 cinfo->capi_ctrl.procinfo = t1isa_procinfo; 431 cinfo->capi_ctrl.procinfo = t1isa_procinfo;
432 cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc; 432 cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
433 strcpy(cinfo->capi_ctrl.name, card->name); 433 strcpy(cinfo->capi_ctrl.name, card->name);
434 434
435 retval = attach_capi_ctr(&cinfo->capi_ctrl); 435 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index e6d298d7514..5a3f8309801 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -119,7 +119,7 @@ static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
119 cinfo->capi_ctrl.load_firmware = b1dma_load_firmware; 119 cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
120 cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr; 120 cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
121 cinfo->capi_ctrl.procinfo = t1pci_procinfo; 121 cinfo->capi_ctrl.procinfo = t1pci_procinfo;
122 cinfo->capi_ctrl.ctr_read_proc = b1dmactl_read_proc; 122 cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
123 strcpy(cinfo->capi_ctrl.name, card->name); 123 strcpy(cinfo->capi_ctrl.name, card->name);
124 124
125 retval = attach_capi_ctr(&cinfo->capi_ctrl); 125 retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
index 98fcdfc7ca5..0f073cd7376 100644
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ b/drivers/isdn/hardware/eicon/capimain.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <linux/seq_file.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
17 18
18#include "os_capi.h" 19#include "os_capi.h"
@@ -75,25 +76,32 @@ void diva_os_free_message_buffer(diva_os_message_buffer_s * dmb)
75/* 76/*
76 * proc function for controller info 77 * proc function for controller info
77 */ 78 */
78static int diva_ctl_read_proc(char *page, char **start, off_t off, 79static int diva_ctl_proc_show(struct seq_file *m, void *v)
79 int count, int *eof, struct capi_ctr *ctrl)
80{ 80{
81 struct capi_ctr *ctrl = m->private;
81 diva_card *card = (diva_card *) ctrl->driverdata; 82 diva_card *card = (diva_card *) ctrl->driverdata;
82 int len = 0; 83
83 84 seq_printf(m, "%s\n", ctrl->name);
84 len += sprintf(page + len, "%s\n", ctrl->name); 85 seq_printf(m, "Serial No. : %s\n", ctrl->serial);
85 len += sprintf(page + len, "Serial No. : %s\n", ctrl->serial); 86 seq_printf(m, "Id : %d\n", card->Id);
86 len += sprintf(page + len, "Id : %d\n", card->Id); 87 seq_printf(m, "Channels : %d\n", card->d.channels);
87 len += sprintf(page + len, "Channels : %d\n", card->d.channels); 88
88 89 return 0;
89 if (off + count >= len) 90}
90 *eof = 1; 91
91 if (len < off) 92static int diva_ctl_proc_open(struct inode *inode, struct file *file)
92 return 0; 93{
93 *start = page + off; 94 return single_open(file, diva_ctl_proc_show, NULL);
94 return ((count < len - off) ? count : len - off);
95} 95}
96 96
97static const struct file_operations diva_ctl_proc_fops = {
98 .owner = THIS_MODULE,
99 .open = diva_ctl_proc_open,
100 .read = seq_read,
101 .llseek = seq_lseek,
102 .release = single_release,
103};
104
97/* 105/*
98 * set additional os settings in capi_ctr struct 106 * set additional os settings in capi_ctr struct
99 */ 107 */
@@ -102,7 +110,7 @@ void diva_os_set_controller_struct(struct capi_ctr *ctrl)
102 ctrl->driver_name = DRIVERLNAME; 110 ctrl->driver_name = DRIVERLNAME;
103 ctrl->load_firmware = NULL; 111 ctrl->load_firmware = NULL;
104 ctrl->reset_ctr = NULL; 112 ctrl->reset_ctr = NULL;
105 ctrl->ctr_read_proc = diva_ctl_read_proc; 113 ctrl->proc_fops = &diva_ctl_proc_fops;
106 ctrl->owner = THIS_MODULE; 114 ctrl->owner = THIS_MODULE;
107} 115}
108 116
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index 993b14cf177..5d06a743782 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
18#include <net/net_namespace.h> 19#include <net/net_namespace.h>
19 20
20#include "platform.h" 21#include "platform.h"
@@ -62,39 +63,41 @@ static char *getrev(const char *revision)
62 return rev; 63 return rev;
63} 64}
64 65
65static int 66static int divadidd_proc_show(struct seq_file *m, void *v)
66proc_read(char *page, char **start, off_t off, int count, int *eof,
67 void *data)
68{ 67{
69 int len = 0;
70 char tmprev[32]; 68 char tmprev[32];
71 69
72 strcpy(tmprev, main_revision); 70 strcpy(tmprev, main_revision);
73 len += sprintf(page + len, "%s\n", DRIVERNAME); 71 seq_printf(m, "%s\n", DRIVERNAME);
74 len += sprintf(page + len, "name : %s\n", DRIVERLNAME); 72 seq_printf(m, "name : %s\n", DRIVERLNAME);
75 len += sprintf(page + len, "release : %s\n", DRIVERRELEASE_DIDD); 73 seq_printf(m, "release : %s\n", DRIVERRELEASE_DIDD);
76 len += sprintf(page + len, "build : %s(%s)\n", 74 seq_printf(m, "build : %s(%s)\n",
77 diva_didd_common_code_build, DIVA_BUILD); 75 diva_didd_common_code_build, DIVA_BUILD);
78 len += sprintf(page + len, "revision : %s\n", getrev(tmprev)); 76 seq_printf(m, "revision : %s\n", getrev(tmprev));
79 77
80 if (off + count >= len) 78 return 0;
81 *eof = 1;
82 if (len < off)
83 return 0;
84 *start = page + off;
85 return ((count < len - off) ? count : len - off);
86} 79}
87 80
81static int divadidd_proc_open(struct inode *inode, struct file *file)
82{
83 return single_open(file, divadidd_proc_show, NULL);
84}
85
86static const struct file_operations divadidd_proc_fops = {
87 .owner = THIS_MODULE,
88 .open = divadidd_proc_open,
89 .read = seq_read,
90 .llseek = seq_lseek,
91 .release = single_release,
92};
93
88static int DIVA_INIT_FUNCTION create_proc(void) 94static int DIVA_INIT_FUNCTION create_proc(void)
89{ 95{
90 proc_net_eicon = proc_mkdir("eicon", init_net.proc_net); 96 proc_net_eicon = proc_mkdir("eicon", init_net.proc_net);
91 97
92 if (proc_net_eicon) { 98 if (proc_net_eicon) {
93 if ((proc_didd = 99 proc_didd = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
94 create_proc_entry(DRIVERLNAME, S_IFREG | S_IRUGO, 100 &divadidd_proc_fops);
95 proc_net_eicon))) {
96 proc_didd->read_proc = proc_read;
97 }
98 return (1); 101 return (1);
99 } 102 }
100 return (0); 103 return (0);
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 69e71ebe784..f577719ab3f 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -17,6 +17,7 @@
17#include <linux/poll.h> 17#include <linux/poll.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/skbuff.h> 19#include <linux/skbuff.h>
20#include <linux/seq_file.h>
20#include <linux/smp_lock.h> 21#include <linux/smp_lock.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22 23
@@ -86,39 +87,40 @@ static void diva_um_timer_function(unsigned long data);
86extern struct proc_dir_entry *proc_net_eicon; 87extern struct proc_dir_entry *proc_net_eicon;
87static struct proc_dir_entry *um_idi_proc_entry = NULL; 88static struct proc_dir_entry *um_idi_proc_entry = NULL;
88 89
89static int 90static int um_idi_proc_show(struct seq_file *m, void *v)
90um_idi_proc_read(char *page, char **start, off_t off, int count, int *eof,
91 void *data)
92{ 91{
93 int len = 0;
94 char tmprev[32]; 92 char tmprev[32];
95 93
96 len += sprintf(page + len, "%s\n", DRIVERNAME); 94 seq_printf(m, "%s\n", DRIVERNAME);
97 len += sprintf(page + len, "name : %s\n", DRIVERLNAME); 95 seq_printf(m, "name : %s\n", DRIVERLNAME);
98 len += sprintf(page + len, "release : %s\n", DRIVERRELEASE_IDI); 96 seq_printf(m, "release : %s\n", DRIVERRELEASE_IDI);
99 strcpy(tmprev, main_revision); 97 strcpy(tmprev, main_revision);
100 len += sprintf(page + len, "revision : %s\n", getrev(tmprev)); 98 seq_printf(m, "revision : %s\n", getrev(tmprev));
101 len += sprintf(page + len, "build : %s\n", DIVA_BUILD); 99 seq_printf(m, "build : %s\n", DIVA_BUILD);
102 len += sprintf(page + len, "major : %d\n", major); 100 seq_printf(m, "major : %d\n", major);
103 101
104 if (off + count >= len) 102 return 0;
105 *eof = 1; 103}
106 if (len < off) 104
107 return 0; 105static int um_idi_proc_open(struct inode *inode, struct file *file)
108 *start = page + off; 106{
109 return ((count < len - off) ? count : len - off); 107 return single_open(file, um_idi_proc_show, NULL);
110} 108}
111 109
110static const struct file_operations um_idi_proc_fops = {
111 .owner = THIS_MODULE,
112 .open = um_idi_proc_open,
113 .read = seq_read,
114 .llseek = seq_lseek,
115 .release = single_release,
116};
117
112static int DIVA_INIT_FUNCTION create_um_idi_proc(void) 118static int DIVA_INIT_FUNCTION create_um_idi_proc(void)
113{ 119{
114 um_idi_proc_entry = create_proc_entry(DRIVERLNAME, 120 um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
115 S_IFREG | S_IRUGO | S_IWUSR, 121 &um_idi_proc_fops);
116 proc_net_eicon);
117 if (!um_idi_proc_entry) 122 if (!um_idi_proc_entry)
118 return (0); 123 return (0);
119
120 um_idi_proc_entry->read_proc = um_idi_proc_read;
121
122 return (1); 124 return (1);
123} 125}
124 126
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index 040827288ec..46d44a94262 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
17#include <linux/list.h> 18#include <linux/list.h>
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
19 20
@@ -141,14 +142,10 @@ void remove_divas_proc(void)
141 } 142 }
142} 143}
143 144
144/* 145static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
145** write group_optimization 146 size_t count, loff_t *pos)
146*/
147static int
148write_grp_opt(struct file *file, const char __user *buffer, unsigned long count,
149 void *data)
150{ 147{
151 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data; 148 diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
152 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 149 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
153 150
154 if ((count == 1) || (count == 2)) { 151 if ((count == 1) || (count == 2)) {
@@ -172,14 +169,10 @@ write_grp_opt(struct file *file, const char __user *buffer, unsigned long count,
172 return (-EINVAL); 169 return (-EINVAL);
173} 170}
174 171
175/* 172static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
176** write dynamic_l1_down 173 size_t count, loff_t *pos)
177*/
178static int
179write_d_l1_down(struct file *file, const char __user *buffer, unsigned long count,
180 void *data)
181{ 174{
182 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data; 175 diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
183 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 176 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
184 177
185 if ((count == 1) || (count == 2)) { 178 if ((count == 1) || (count == 2)) {
@@ -203,63 +196,62 @@ write_d_l1_down(struct file *file, const char __user *buffer, unsigned long coun
203 return (-EINVAL); 196 return (-EINVAL);
204} 197}
205 198
206 199static int d_l1_down_proc_show(struct seq_file *m, void *v)
207/*
208** read dynamic_l1_down
209*/
210static int
211read_d_l1_down(char *page, char **start, off_t off, int count, int *eof,
212 void *data)
213{ 200{
214 int len = 0; 201 diva_os_xdi_adapter_t *a = m->private;
215 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
216 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 202 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
217 203
218 len += sprintf(page + len, "%s\n", 204 seq_printf(m, "%s\n",
219 (IoAdapter->capi_cfg. 205 (IoAdapter->capi_cfg.
220 cfg_1 & DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? "1" : 206 cfg_1 & DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? "1" :
221 "0"); 207 "0");
208 return 0;
209}
222 210
223 if (off + count >= len) 211static int d_l1_down_proc_open(struct inode *inode, struct file *file)
224 *eof = 1; 212{
225 if (len < off) 213 return single_open(file, d_l1_down_proc_show, PDE(inode)->data);
226 return 0;
227 *start = page + off;
228 return ((count < len - off) ? count : len - off);
229} 214}
230 215
231/* 216static const struct file_operations d_l1_down_proc_fops = {
232** read group_optimization 217 .owner = THIS_MODULE,
233*/ 218 .open = d_l1_down_proc_open,
234static int 219 .read = seq_read,
235read_grp_opt(char *page, char **start, off_t off, int count, int *eof, 220 .llseek = seq_lseek,
236 void *data) 221 .release = single_release,
222 .write = d_l1_down_proc_write,
223};
224
225static int grp_opt_proc_show(struct seq_file *m, void *v)
237{ 226{
238 int len = 0; 227 diva_os_xdi_adapter_t *a = m->private;
239 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
240 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 228 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
241 229
242 len += sprintf(page + len, "%s\n", 230 seq_printf(m, "%s\n",
243 (IoAdapter->capi_cfg. 231 (IoAdapter->capi_cfg.
244 cfg_1 & DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) 232 cfg_1 & DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON)
245 ? "1" : "0"); 233 ? "1" : "0");
234 return 0;
235}
246 236
247 if (off + count >= len) 237static int grp_opt_proc_open(struct inode *inode, struct file *file)
248 *eof = 1; 238{
249 if (len < off) 239 return single_open(file, grp_opt_proc_show, PDE(inode)->data);
250 return 0;
251 *start = page + off;
252 return ((count < len - off) ? count : len - off);
253} 240}
254 241
255/* 242static const struct file_operations grp_opt_proc_fops = {
256** info write 243 .owner = THIS_MODULE,
257*/ 244 .open = grp_opt_proc_open,
258static int 245 .read = seq_read,
259info_write(struct file *file, const char __user *buffer, unsigned long count, 246 .llseek = seq_lseek,
260 void *data) 247 .release = single_release,
248 .write = grp_opt_proc_write,
249};
250
251static ssize_t info_proc_write(struct file *file, const char __user *buffer,
252 size_t count, loff_t *pos)
261{ 253{
262 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data; 254 diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
263 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 255 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
264 char c[4]; 256 char c[4];
265 257
@@ -277,63 +269,46 @@ info_write(struct file *file, const char __user *buffer, unsigned long count,
277 return (-EINVAL); 269 return (-EINVAL);
278} 270}
279 271
280/* 272static int info_proc_show(struct seq_file *m, void *v)
281** info read
282*/
283static int
284info_read(char *page, char **start, off_t off, int count, int *eof,
285 void *data)
286{ 273{
287 int i = 0; 274 int i = 0;
288 int len = 0;
289 char *p; 275 char *p;
290 char tmpser[16]; 276 char tmpser[16];
291 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data; 277 diva_os_xdi_adapter_t *a = m->private;
292 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; 278 PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
293 279
294 len += 280 seq_printf(m, "Name : %s\n", IoAdapter->Properties.Name);
295 sprintf(page + len, "Name : %s\n", 281 seq_printf(m, "DSP state : %08x\n", a->dsp_mask);
296 IoAdapter->Properties.Name); 282 seq_printf(m, "Channels : %02d\n", IoAdapter->Properties.Channels);
297 len += sprintf(page + len, "DSP state : %08x\n", a->dsp_mask); 283 seq_printf(m, "E. max/used : %03d/%03d\n",
298 len += sprintf(page + len, "Channels : %02d\n",
299 IoAdapter->Properties.Channels);
300 len += sprintf(page + len, "E. max/used : %03d/%03d\n",
301 IoAdapter->e_max, IoAdapter->e_count); 284 IoAdapter->e_max, IoAdapter->e_count);
302 diva_get_vserial_number(IoAdapter, tmpser); 285 diva_get_vserial_number(IoAdapter, tmpser);
303 len += sprintf(page + len, "Serial : %s\n", tmpser); 286 seq_printf(m, "Serial : %s\n", tmpser);
304 len += 287 seq_printf(m, "IRQ : %d\n", IoAdapter->irq_info.irq_nr);
305 sprintf(page + len, "IRQ : %d\n", 288 seq_printf(m, "CardIndex : %d\n", a->CardIndex);
306 IoAdapter->irq_info.irq_nr); 289 seq_printf(m, "CardOrdinal : %d\n", a->CardOrdinal);
307 len += sprintf(page + len, "CardIndex : %d\n", a->CardIndex); 290 seq_printf(m, "Controller : %d\n", a->controller);
308 len += sprintf(page + len, "CardOrdinal : %d\n", a->CardOrdinal); 291 seq_printf(m, "Bus-Type : %s\n",
309 len += sprintf(page + len, "Controller : %d\n", a->controller);
310 len += sprintf(page + len, "Bus-Type : %s\n",
311 (a->Bus == 292 (a->Bus ==
312 DIVAS_XDI_ADAPTER_BUS_ISA) ? "ISA" : "PCI"); 293 DIVAS_XDI_ADAPTER_BUS_ISA) ? "ISA" : "PCI");
313 len += sprintf(page + len, "Port-Name : %s\n", a->port_name); 294 seq_printf(m, "Port-Name : %s\n", a->port_name);
314 if (a->Bus == DIVAS_XDI_ADAPTER_BUS_PCI) { 295 if (a->Bus == DIVAS_XDI_ADAPTER_BUS_PCI) {
315 len += 296 seq_printf(m, "PCI-bus : %d\n", a->resources.pci.bus);
316 sprintf(page + len, "PCI-bus : %d\n", 297 seq_printf(m, "PCI-func : %d\n", a->resources.pci.func);
317 a->resources.pci.bus);
318 len +=
319 sprintf(page + len, "PCI-func : %d\n",
320 a->resources.pci.func);
321 for (i = 0; i < 8; i++) { 298 for (i = 0; i < 8; i++) {
322 if (a->resources.pci.bar[i]) { 299 if (a->resources.pci.bar[i]) {
323 len += 300 seq_printf(m,
324 sprintf(page + len,
325 "Mem / I/O %d : 0x%x / mapped : 0x%lx", 301 "Mem / I/O %d : 0x%x / mapped : 0x%lx",
326 i, a->resources.pci.bar[i], 302 i, a->resources.pci.bar[i],
327 (unsigned long) a->resources. 303 (unsigned long) a->resources.
328 pci.addr[i]); 304 pci.addr[i]);
329 if (a->resources.pci.length[i]) { 305 if (a->resources.pci.length[i]) {
330 len += 306 seq_printf(m,
331 sprintf(page + len,
332 " / length : %d", 307 " / length : %d",
333 a->resources.pci. 308 a->resources.pci.
334 length[i]); 309 length[i]);
335 } 310 }
336 len += sprintf(page + len, "\n"); 311 seq_putc(m, '\n');
337 } 312 }
338 } 313 }
339 } 314 }
@@ -353,16 +328,25 @@ info_read(char *page, char **start, off_t off, int count, int *eof,
353 } else { 328 } else {
354 p = "ready"; 329 p = "ready";
355 } 330 }
356 len += sprintf(page + len, "State : %s\n", p); 331 seq_printf(m, "State : %s\n", p);
357 332
358 if (off + count >= len) 333 return 0;
359 *eof = 1; 334}
360 if (len < off) 335
361 return 0; 336static int info_proc_open(struct inode *inode, struct file *file)
362 *start = page + off; 337{
363 return ((count < len - off) ? count : len - off); 338 return single_open(file, info_proc_show, PDE(inode)->data);
364} 339}
365 340
341static const struct file_operations info_proc_fops = {
342 .owner = THIS_MODULE,
343 .open = info_proc_open,
344 .read = seq_read,
345 .llseek = seq_lseek,
346 .release = single_release,
347 .write = info_proc_write,
348};
349
366/* 350/*
367** adapter proc init/de-init 351** adapter proc init/de-init
368*/ 352*/
@@ -380,28 +364,20 @@ int create_adapter_proc(diva_os_xdi_adapter_t * a)
380 return (0); 364 return (0);
381 a->proc_adapter_dir = (void *) de; 365 a->proc_adapter_dir = (void *) de;
382 366
383 if (!(pe = 367 pe = proc_create_data(info_proc_name, S_IRUGO | S_IWUSR, de,
384 create_proc_entry(info_proc_name, S_IFREG | S_IRUGO | S_IWUSR, de))) 368 &info_proc_fops, a);
369 if (!pe)
385 return (0); 370 return (0);
386 a->proc_info = (void *) pe; 371 a->proc_info = (void *) pe;
387 pe->write_proc = info_write;
388 pe->read_proc = info_read;
389 pe->data = a;
390 372
391 if ((pe = create_proc_entry(grp_opt_proc_name, 373 pe = proc_create_data(grp_opt_proc_name, S_IRUGO | S_IWUSR, de,
392 S_IFREG | S_IRUGO | S_IWUSR, de))) { 374 &grp_opt_proc_fops, a);
375 if (pe)
393 a->proc_grp_opt = (void *) pe; 376 a->proc_grp_opt = (void *) pe;
394 pe->write_proc = write_grp_opt; 377 pe = proc_create_data(d_l1_down_proc_name, S_IRUGO | S_IWUSR, de,
395 pe->read_proc = read_grp_opt; 378 &d_l1_down_proc_fops, a);
396 pe->data = a; 379 if (pe)
397 }
398 if ((pe = create_proc_entry(d_l1_down_proc_name,
399 S_IFREG | S_IRUGO | S_IWUSR, de))) {
400 a->proc_d_l1_down = (void *) pe; 380 a->proc_d_l1_down = (void *) pe;
401 pe->write_proc = write_d_l1_down;
402 pe->read_proc = read_d_l1_down;
403 pe->data = a;
404 }
405 381
406 DBG_TRC(("proc entry %s created", tmp)); 382 DBG_TRC(("proc entry %s created", tmp));
407 383
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 4ffaa14b9fc..fe874afa4f8 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -11,6 +11,8 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
14#include <linux/signal.h> 16#include <linux/signal.h>
15#include <linux/kernel.h> 17#include <linux/kernel.h>
16#include <linux/skbuff.h> 18#include <linux/skbuff.h>
@@ -432,26 +434,16 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
432 return retval; 434 return retval;
433} 435}
434 436
435/********************************************************************* 437static int hycapi_proc_show(struct seq_file *m, void *v)
436hycapi_read_proc
437
438Informations provided in the /proc/capi-entries.
439
440*********************************************************************/
441
442static int hycapi_read_proc(char *page, char **start, off_t off,
443 int count, int *eof, struct capi_ctr *ctrl)
444{ 438{
439 struct capi_ctr *ctrl = m->private;
445 hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); 440 hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
446 hysdn_card *card = cinfo->card; 441 hysdn_card *card = cinfo->card;
447 int len = 0;
448 char *s; 442 char *s;
449#ifdef HYCAPI_PRINTFNAMES 443
450 printk(KERN_NOTICE "hycapi_read_proc\n"); 444 seq_printf(m, "%-16s %s\n", "name", cinfo->cardname);
451#endif 445 seq_printf(m, "%-16s 0x%x\n", "io", card->iobase);
452 len += sprintf(page+len, "%-16s %s\n", "name", cinfo->cardname); 446 seq_printf(m, "%-16s %d\n", "irq", card->irq);
453 len += sprintf(page+len, "%-16s 0x%x\n", "io", card->iobase);
454 len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
455 447
456 switch (card->brdtype) { 448 switch (card->brdtype) {
457 case BD_PCCARD: s = "HYSDN Hycard"; break; 449 case BD_PCCARD: s = "HYSDN Hycard"; break;
@@ -461,24 +453,32 @@ static int hycapi_read_proc(char *page, char **start, off_t off,
461 case BD_PLEXUS: s = "HYSDN Plexus30"; break; 453 case BD_PLEXUS: s = "HYSDN Plexus30"; break;
462 default: s = "???"; break; 454 default: s = "???"; break;
463 } 455 }
464 len += sprintf(page+len, "%-16s %s\n", "type", s); 456 seq_printf(m, "%-16s %s\n", "type", s);
465 if ((s = cinfo->version[VER_DRIVER]) != NULL) 457 if ((s = cinfo->version[VER_DRIVER]) != NULL)
466 len += sprintf(page+len, "%-16s %s\n", "ver_driver", s); 458 seq_printf(m, "%-16s %s\n", "ver_driver", s);
467 if ((s = cinfo->version[VER_CARDTYPE]) != NULL) 459 if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
468 len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s); 460 seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
469 if ((s = cinfo->version[VER_SERIAL]) != NULL) 461 if ((s = cinfo->version[VER_SERIAL]) != NULL)
470 len += sprintf(page+len, "%-16s %s\n", "ver_serial", s); 462 seq_printf(m, "%-16s %s\n", "ver_serial", s);
471 463
472 len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname); 464 seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
473 465
474 if (off+count >= len) 466 return 0;
475 *eof = 1; 467}
476 if (len < off) 468
477 return 0; 469static int hycapi_proc_open(struct inode *inode, struct file *file)
478 *start = page + off; 470{
479 return ((count < len-off) ? count : len-off); 471 return single_open(file, hycapi_proc_show, PDE(inode)->data);
480} 472}
481 473
474static const struct file_operations hycapi_proc_fops = {
475 .owner = THIS_MODULE,
476 .open = hycapi_proc_open,
477 .read = seq_read,
478 .llseek = seq_lseek,
479 .release = single_release,
480};
481
482/************************************************************** 482/**************************************************************
483hycapi_load_firmware 483hycapi_load_firmware
484 484
@@ -774,7 +774,7 @@ hycapi_capi_create(hysdn_card *card)
774 ctrl->load_firmware = hycapi_load_firmware; 774 ctrl->load_firmware = hycapi_load_firmware;
775 ctrl->reset_ctr = hycapi_reset_ctr; 775 ctrl->reset_ctr = hycapi_reset_ctr;
776 ctrl->procinfo = hycapi_procinfo; 776 ctrl->procinfo = hycapi_procinfo;
777 ctrl->ctr_read_proc = hycapi_read_proc; 777 ctrl->proc_fops = &hycapi_proc_fops;
778 strcpy(ctrl->name, cinfo->cardname); 778 strcpy(ctrl->name, cinfo->cardname);
779 ctrl->owner = THIS_MODULE; 779 ctrl->owner = THIS_MODULE;
780 780
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8b8558fcb04..da6552d32cf 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -949,11 +949,8 @@ static int dvb_net_filter_sec_set(struct net_device *dev,
949 (*secfilter)->filter_mask[10] = mac_mask[1]; 949 (*secfilter)->filter_mask[10] = mac_mask[1];
950 (*secfilter)->filter_mask[11]=mac_mask[0]; 950 (*secfilter)->filter_mask[11]=mac_mask[0];
951 951
952 dprintk("%s: filter mac=%02x %02x %02x %02x %02x %02x\n", 952 dprintk("%s: filter mac=%pM\n", dev->name, mac);
953 dev->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 953 dprintk("%s: filter mask=%pM\n", dev->name, mac_mask);
954 dprintk("%s: filter mask=%02x %02x %02x %02x %02x %02x\n",
955 dev->name, mac_mask[0], mac_mask[1], mac_mask[2],
956 mac_mask[3], mac_mask[4], mac_mask[5]);
957 954
958 return 0; 955 return 0;
959} 956}
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 7045c45da9b..949a648f8e2 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -111,10 +111,7 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
111 break; 111 break;
112 112
113 case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */ 113 case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
114 seq_printf(seq, 114 seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]);
115 "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X",
116 serialno[2], serialno[3],
117 serialno[4], serialno[5], serialno[6], serialno[7]);
118 break; 115 break;
119 116
120 case I2O_SNFORMAT_WAN: /* WAN MAC Address */ 117 case I2O_SNFORMAT_WAN: /* WAN MAC Address */
@@ -126,10 +123,8 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
126 case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */ 123 case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
127 /* FIXME: Figure out what a LAN-64 address really looks like?? */ 124 /* FIXME: Figure out what a LAN-64 address really looks like?? */
128 seq_printf(seq, 125 seq_printf(seq,
129 "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X", 126 "LAN-64 MAC address @ [?:%02X:%02X:?] %pM",
130 serialno[8], serialno[9], 127 serialno[8], serialno[9], &serialno[2]);
131 serialno[2], serialno[3],
132 serialno[4], serialno[5], serialno[6], serialno[7]);
133 break; 128 break;
134 129
135 case I2O_SNFORMAT_DDM: /* I2O DDM */ 130 case I2O_SNFORMAT_DDM: /* I2O DDM */
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
index 50d431e469f..9dbaeb574e6 100644
--- a/drivers/misc/iwmc3200top/fw-download.c
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -43,15 +43,14 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
43 struct iwmct_parser *parser = &priv->parser; 43 struct iwmct_parser *parser = &priv->parser;
44 struct iwmct_fw_hdr *fw_hdr = &parser->versions; 44 struct iwmct_fw_hdr *fw_hdr = &parser->versions;
45 45
46 LOG_INFOEX(priv, INIT, "-->\n"); 46 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
47 47
48 LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size); 48 LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
49 49
50 parser->file = file; 50 parser->file = file;
51 parser->file_size = file_size; 51 parser->file_size = file_size;
52 parser->cur_pos = 0; 52 parser->cur_pos = 0;
53 parser->buf = NULL; 53 parser->entry_point = 0;
54
55 parser->buf = kzalloc(block_size, GFP_KERNEL); 54 parser->buf = kzalloc(block_size, GFP_KERNEL);
56 if (!parser->buf) { 55 if (!parser->buf) {
57 LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n"); 56 LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
@@ -70,7 +69,7 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
70 69
71 parser->cur_pos += sizeof(struct iwmct_fw_hdr); 70 parser->cur_pos += sizeof(struct iwmct_fw_hdr);
72 71
73 LOG_INFOEX(priv, INIT, "<--\n"); 72 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
74 return 0; 73 return 0;
75} 74}
76 75
@@ -113,7 +112,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
113 struct iwmct_dbg *dbg = &priv->dbg; 112 struct iwmct_dbg *dbg = &priv->dbg;
114 struct iwmct_fw_sec_hdr *sec_hdr; 113 struct iwmct_fw_sec_hdr *sec_hdr;
115 114
116 LOG_INFOEX(priv, INIT, "-->\n"); 115 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
117 116
118 while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr) 117 while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
119 <= parser->file_size) { 118 <= parser->file_size) {
@@ -152,7 +151,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
152 "finished with section cur_pos=%zd\n", parser->cur_pos); 151 "finished with section cur_pos=%zd\n", parser->cur_pos);
153 } 152 }
154 153
155 LOG_INFOEX(priv, INIT, "<--\n"); 154 LOG_TRACE(priv, INIT, "<--\n");
156 return 0; 155 return 0;
157} 156}
158 157
@@ -167,7 +166,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
167 int ret = 0; 166 int ret = 0;
168 u32 cmd = 0; 167 u32 cmd = 0;
169 168
170 LOG_INFOEX(priv, INIT, "-->\n"); 169 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
171 LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n", 170 LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
172 addr, sec_size); 171 addr, sec_size);
173 172
@@ -229,7 +228,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
229 hdr->cmd = cpu_to_le32(cmd); 228 hdr->cmd = cpu_to_le32(cmd);
230 /* send it down */ 229 /* send it down */
231 /* TODO: add more proper sending and error checking */ 230 /* TODO: add more proper sending and error checking */
232 ret = iwmct_tx(priv, 0, parser->buf, trans_size); 231 ret = iwmct_tx(priv, parser->buf, trans_size);
233 if (ret != 0) { 232 if (ret != 0) {
234 LOG_INFO(priv, FW_DOWNLOAD, 233 LOG_INFO(priv, FW_DOWNLOAD,
235 "iwmct_tx returned %d\n", ret); 234 "iwmct_tx returned %d\n", ret);
@@ -251,7 +250,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
251 if (sent < sec_size) 250 if (sent < sec_size)
252 ret = -EINVAL; 251 ret = -EINVAL;
253exit: 252exit:
254 LOG_INFOEX(priv, INIT, "<--\n"); 253 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
255 return ret; 254 return ret;
256} 255}
257 256
@@ -262,7 +261,7 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
262 int ret; 261 int ret;
263 u32 cmd; 262 u32 cmd;
264 263
265 LOG_INFOEX(priv, INIT, "-->\n"); 264 LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
266 265
267 memset(parser->buf, 0, parser->buf_size); 266 memset(parser->buf, 0, parser->buf_size);
268 cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS; 267 cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
@@ -281,11 +280,11 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
281 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr)); 280 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
282 /* send it down */ 281 /* send it down */
283 /* TODO: add more proper sending and error checking */ 282 /* TODO: add more proper sending and error checking */
284 ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE); 283 ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
285 if (ret) 284 if (ret)
286 LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret); 285 LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
287 286
288 LOG_INFOEX(priv, INIT, "<--\n"); 287 LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
289 return 0; 288 return 0;
290} 289}
291 290
@@ -298,8 +297,16 @@ int iwmct_fw_load(struct iwmct_priv *priv)
298 __le32 addr; 297 __le32 addr;
299 int ret; 298 int ret;
300 299
301 /* clear parser struct */ 300
302 memset(&priv->parser, 0, sizeof(struct iwmct_parser)); 301 LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
302 priv->barker);
303 LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
304 (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
305 LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
306 (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
307 LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
308 (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
309
303 310
304 /* get the firmware */ 311 /* get the firmware */
305 ret = request_firmware(&raw, fw_name, &priv->func->dev); 312 ret = request_firmware(&raw, fw_name, &priv->func->dev);
@@ -317,6 +324,7 @@ int iwmct_fw_load(struct iwmct_priv *priv)
317 324
318 LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name); 325 LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
319 326
327 /* clear parser struct */
320 ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len); 328 ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
321 if (ret < 0) { 329 if (ret < 0) {
322 LOG_ERROR(priv, FW_DOWNLOAD, 330 LOG_ERROR(priv, FW_DOWNLOAD,
@@ -324,7 +332,6 @@ int iwmct_fw_load(struct iwmct_priv *priv)
324 goto exit; 332 goto exit;
325 } 333 }
326 334
327 /* checksum */
328 if (!iwmct_checksum(priv)) { 335 if (!iwmct_checksum(priv)) {
329 LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n"); 336 LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
330 ret = -EINVAL; 337 ret = -EINVAL;
@@ -333,23 +340,18 @@ int iwmct_fw_load(struct iwmct_priv *priv)
333 340
334 /* download firmware to device */ 341 /* download firmware to device */
335 while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) { 342 while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
336 if (iwmct_download_section(priv, pdata, len, addr)) { 343 ret = iwmct_download_section(priv, pdata, len, addr);
344 if (ret) {
337 LOG_ERROR(priv, FW_DOWNLOAD, 345 LOG_ERROR(priv, FW_DOWNLOAD,
338 "%s download section failed\n", fw_name); 346 "%s download section failed\n", fw_name);
339 ret = -EIO;
340 goto exit; 347 goto exit;
341 } 348 }
342 } 349 }
343 350
344 iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK)); 351 ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
345 352
346exit: 353exit:
347 kfree(priv->parser.buf); 354 kfree(priv->parser.buf);
348 355 release_firmware(raw);
349 if (raw)
350 release_firmware(raw);
351
352 raw = NULL;
353
354 return ret; 356 return ret;
355} 357}
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
index 43bd510e187..740ff0738ea 100644
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -196,9 +196,7 @@ struct iwmct_priv {
196 struct list_head read_req_list; 196 struct list_head read_req_list;
197}; 197};
198 198
199extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr, 199extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
200 void *src, int count);
201
202extern int iwmct_fw_load(struct iwmct_priv *priv); 200extern int iwmct_fw_load(struct iwmct_priv *priv);
203 201
204extern void iwmct_dbg_init_params(struct iwmct_priv *drv); 202extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
index aba8121f978..4434bb16cea 100644
--- a/drivers/misc/iwmc3200top/log.h
+++ b/drivers/misc/iwmc3200top/log.h
@@ -37,13 +37,26 @@
37#define LOG_SEV_INFO 3 37#define LOG_SEV_INFO 3
38#define LOG_SEV_INFOEX 4 38#define LOG_SEV_INFOEX 4
39 39
40#define LOG_SEV_FILTER_ALL \ 40/* Log levels not defined for FW */
41 (BIT(LOG_SEV_CRITICAL) | \ 41#define LOG_SEV_TRACE 5
42 BIT(LOG_SEV_ERROR) | \ 42#define LOG_SEV_DUMP 6
43 BIT(LOG_SEV_WARNING) | \ 43
44 BIT(LOG_SEV_INFO) | \ 44#define LOG_SEV_FW_FILTER_ALL \
45 (BIT(LOG_SEV_CRITICAL) | \
46 BIT(LOG_SEV_ERROR) | \
47 BIT(LOG_SEV_WARNING) | \
48 BIT(LOG_SEV_INFO) | \
45 BIT(LOG_SEV_INFOEX)) 49 BIT(LOG_SEV_INFOEX))
46 50
51#define LOG_SEV_FILTER_ALL \
52 (BIT(LOG_SEV_CRITICAL) | \
53 BIT(LOG_SEV_ERROR) | \
54 BIT(LOG_SEV_WARNING) | \
55 BIT(LOG_SEV_INFO) | \
56 BIT(LOG_SEV_INFOEX) | \
57 BIT(LOG_SEV_TRACE) | \
58 BIT(LOG_SEV_DUMP))
59
47/* log source */ 60/* log source */
48#define LOG_SRC_INIT 0 61#define LOG_SRC_INIT 0
49#define LOG_SRC_DEBUGFS 1 62#define LOG_SRC_DEBUGFS 1
@@ -104,16 +117,16 @@ do { \
104 __func__, __LINE__, ##args); \ 117 __func__, __LINE__, ##args); \
105} while (0) 118} while (0)
106 119
107#define LOG_INFOEX(priv, src, fmt, args...) \ 120#define LOG_TRACE(priv, src, fmt, args...) \
108do { \ 121do { \
109 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \ 122 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
110 dev_dbg(priv2dev(priv), "%s %d: " fmt, \ 123 dev_dbg(priv2dev(priv), "%s %d: " fmt, \
111 __func__, __LINE__, ##args); \ 124 __func__, __LINE__, ##args); \
112} while (0) 125} while (0)
113 126
114#define LOG_HEXDUMP(src, ptr, len) \ 127#define LOG_HEXDUMP(src, ptr, len) \
115do { \ 128do { \
116 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \ 129 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
117 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \ 130 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
118 16, 1, ptr, len, false); \ 131 16, 1, ptr, len, false); \
119} while (0) 132} while (0)
@@ -142,7 +155,7 @@ ssize_t store_iwmct_log_level_fw(struct device *d,
142#define LOG_ERROR(priv, src, fmt, args...) 155#define LOG_ERROR(priv, src, fmt, args...)
143#define LOG_WARNING(priv, src, fmt, args...) 156#define LOG_WARNING(priv, src, fmt, args...)
144#define LOG_INFO(priv, src, fmt, args...) 157#define LOG_INFO(priv, src, fmt, args...)
145#define LOG_INFOEX(priv, src, fmt, args...) 158#define LOG_TRACE(priv, src, fmt, args...)
146#define LOG_HEXDUMP(src, ptr, len) 159#define LOG_HEXDUMP(src, ptr, len)
147 160
148static inline void iwmct_log_top_message(struct iwmct_priv *priv, 161static inline void iwmct_log_top_message(struct iwmct_priv *priv,
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index fafcaa481d7..dd0a3913bf6 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -49,6 +49,20 @@ MODULE_LICENSE("GPL");
49MODULE_AUTHOR(DRIVER_COPYRIGHT); 49MODULE_AUTHOR(DRIVER_COPYRIGHT);
50MODULE_FIRMWARE(FW_NAME(FW_API_VER)); 50MODULE_FIRMWARE(FW_NAME(FW_API_VER));
51 51
52
53static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
54{
55 return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
56
57}
58int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
59{
60 int ret;
61 sdio_claim_host(priv->func);
62 ret = __iwmct_tx(priv, src, count);
63 sdio_release_host(priv->func);
64 return ret;
65}
52/* 66/*
53 * This workers main task is to wait for OP_OPR_ALIVE 67 * This workers main task is to wait for OP_OPR_ALIVE
54 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed. 68 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
@@ -66,7 +80,7 @@ static void iwmct_rescan_worker(struct work_struct *ws)
66 80
67 ret = bus_rescan_devices(priv->func->dev.bus); 81 ret = bus_rescan_devices(priv->func->dev.bus);
68 if (ret < 0) 82 if (ret < 0)
69 LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n"); 83 LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
70} 84}
71 85
72static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg) 86static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
@@ -137,7 +151,7 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
137 int ret; 151 int ret;
138 u8 *buf; 152 u8 *buf;
139 153
140 LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n"); 154 LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
141 155
142 /* add padding to 256 for IWMC */ 156 /* add padding to 256 for IWMC */
143 ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256; 157 ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
@@ -158,27 +172,12 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
158 } 172 }
159 173
160 memcpy(buf, cmd, len); 174 memcpy(buf, cmd, len);
161 175 ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
162 sdio_claim_host(priv->func);
163 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
164 FW_HCMD_BLOCK_SIZE);
165 sdio_release_host(priv->func);
166 176
167 kfree(buf); 177 kfree(buf);
168 return ret; 178 return ret;
169} 179}
170 180
171int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
172 void *src, int count)
173{
174 int ret;
175
176 sdio_claim_host(priv->func);
177 ret = sdio_memcpy_toio(priv->func, addr, src, count);
178 sdio_release_host(priv->func);
179
180 return ret;
181}
182 181
183static void iwmct_irq_read_worker(struct work_struct *ws) 182static void iwmct_irq_read_worker(struct work_struct *ws)
184{ 183{
@@ -192,7 +191,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
192 191
193 priv = container_of(ws, struct iwmct_priv, isr_worker); 192 priv = container_of(ws, struct iwmct_priv, isr_worker);
194 193
195 LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws); 194 LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
196 195
197 /* --------------------- Handshake with device -------------------- */ 196 /* --------------------- Handshake with device -------------------- */
198 sdio_claim_host(priv->func); 197 sdio_claim_host(priv->func);
@@ -273,8 +272,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
273 272
274 if (barker & BARKER_DNLOAD_SYNC_MSK) { 273 if (barker & BARKER_DNLOAD_SYNC_MSK) {
275 /* Send the same barker back */ 274 /* Send the same barker back */
276 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, 275 ret = __iwmct_tx(priv, buf, iosize);
277 buf, iosize);
278 if (ret) { 276 if (ret) {
279 LOG_ERROR(priv, IRQ, 277 LOG_ERROR(priv, IRQ,
280 "error %d echoing barker\n", ret); 278 "error %d echoing barker\n", ret);
@@ -292,15 +290,6 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
292 290
293 sdio_release_host(priv->func); 291 sdio_release_host(priv->func);
294 292
295
296 LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
297 LOG_INFO(priv, IRQ, "******* Top FW %s requested ********\n",
298 (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
299 LOG_INFO(priv, IRQ, "******* GPS FW %s requested ********\n",
300 (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
301 LOG_INFO(priv, IRQ, "******* BT FW %s requested ********\n",
302 (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
303
304 if (priv->dbg.fw_download) 293 if (priv->dbg.fw_download)
305 iwmct_fw_load(priv); 294 iwmct_fw_load(priv);
306 else 295 else
@@ -312,7 +301,7 @@ exit_release:
312 sdio_release_host(priv->func); 301 sdio_release_host(priv->func);
313exit: 302exit:
314 kfree(buf); 303 kfree(buf);
315 LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n"); 304 LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
316} 305}
317 306
318static void iwmct_irq(struct sdio_func *func) 307static void iwmct_irq(struct sdio_func *func)
@@ -325,12 +314,12 @@ static void iwmct_irq(struct sdio_func *func)
325 314
326 priv = sdio_get_drvdata(func); 315 priv = sdio_get_drvdata(func);
327 316
328 LOG_INFO(priv, IRQ, "enter iwmct_irq\n"); 317 LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
329 318
330 /* read the function's status register */ 319 /* read the function's status register */
331 val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret); 320 val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
332 321
333 LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret); 322 LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
334 323
335 if (!val) { 324 if (!val) {
336 LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n"); 325 LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
@@ -372,7 +361,7 @@ static void iwmct_irq(struct sdio_func *func)
372 361
373 queue_work(priv->wq, &priv->isr_worker); 362 queue_work(priv->wq, &priv->isr_worker);
374 363
375 LOG_INFO(priv, IRQ, "exit iwmct_irq\n"); 364 LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
376 365
377 return; 366 return;
378 367
@@ -660,7 +649,7 @@ static int __init iwmct_init(void)
660 649
661 /* Default log filter settings */ 650 /* Default log filter settings */
662 iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME); 651 iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
663 iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL); 652 iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
664 iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME); 653 iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
665 654
666 rc = sdio_register_driver(&iwmct_driver); 655 rc = sdio_register_driver(&iwmct_driver);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 39db0e96815..5df46c230b0 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -375,7 +375,7 @@ static struct vortex_chip_info {
375}; 375};
376 376
377 377
378static struct pci_device_id vortex_pci_tbl[] = { 378static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
379 { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, 379 { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
380 { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, 380 { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
381 { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, 381 { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 3f452bcbfb9..9d59654748b 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -394,7 +394,7 @@ static int cp_get_eeprom(struct net_device *dev,
394static int cp_set_eeprom(struct net_device *dev, 394static int cp_set_eeprom(struct net_device *dev,
395 struct ethtool_eeprom *eeprom, u8 *data); 395 struct ethtool_eeprom *eeprom, u8 *data);
396 396
397static struct pci_device_id cp_pci_tbl[] = { 397static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
398 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, 398 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
399 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, 399 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
400 { }, 400 { },
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 25f7339daab..321e73aabb2 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -231,7 +231,7 @@ static const struct {
231}; 231};
232 232
233 233
234static struct pci_device_id rtl8139_pci_tbl[] = { 234static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
235 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 235 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
236 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 236 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
237 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 237 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dd9a09c72df..cb0e534418e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2618,6 +2618,28 @@ config IXGBE_DCB
2618 2618
2619 If unsure, say N. 2619 If unsure, say N.
2620 2620
2621config IXGBEVF
2622 tristate "Intel(R) 82599 Virtual Function Ethernet support"
2623 depends on PCI_MSI
2624 ---help---
2625 This driver supports Intel(R) 82599 virtual functions. For more
2626 information on how to identify your adapter, go to the Adapter &
2627 Driver ID Guide at:
2628
2629 <http://support.intel.com/support/network/sb/CS-008441.htm>
2630
2631 For general information and support, go to the Intel support
2632 website at:
2633
2634 <http://support.intel.com>
2635
2636 More specific information on configuring the driver is in
2637 <file:Documentation/networking/ixgbevf.txt>.
2638
2639 To compile this driver as a module, choose M here. The module
2640 will be called ixgbevf. MSI-X interrupt support is required
2641 for this driver to work correctly.
2642
2621config IXGB 2643config IXGB
2622 tristate "Intel(R) PRO/10GbE support" 2644 tristate "Intel(R) PRO/10GbE support"
2623 depends on PCI 2645 depends on PCI
@@ -2756,6 +2778,13 @@ config BNX2X
2756 To compile this driver as a module, choose M here: the module 2778 To compile this driver as a module, choose M here: the module
2757 will be called bnx2x. This is recommended. 2779 will be called bnx2x. This is recommended.
2758 2780
2781config QLCNIC
2782 tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
2783 depends on PCI
2784 help
2785 This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
2786 devices.
2787
2759config QLGE 2788config QLGE
2760 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 2789 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
2761 depends on PCI 2790 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ad1346dd9da..0b763cbe9b1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
14obj-$(CONFIG_IGB) += igb/ 14obj-$(CONFIG_IGB) += igb/
15obj-$(CONFIG_IGBVF) += igbvf/ 15obj-$(CONFIG_IGBVF) += igbvf/
16obj-$(CONFIG_IXGBE) += ixgbe/ 16obj-$(CONFIG_IXGBE) += ixgbe/
17obj-$(CONFIG_IXGBEVF) += ixgbevf/
17obj-$(CONFIG_IXGB) += ixgb/ 18obj-$(CONFIG_IXGB) += ixgb/
18obj-$(CONFIG_IP1000) += ipg.o 19obj-$(CONFIG_IP1000) += ipg.o
19obj-$(CONFIG_CHELSIO_T1) += chelsio/ 20obj-$(CONFIG_CHELSIO_T1) += chelsio/
@@ -148,6 +149,7 @@ ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
148obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o 149obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
149obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o 150obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
150obj-$(CONFIG_QLA3XXX) += qla3xxx.o 151obj-$(CONFIG_QLA3XXX) += qla3xxx.o
152obj-$(CONFIG_QLCNIC) += qlcnic/
151obj-$(CONFIG_QLGE) += qlge/ 153obj-$(CONFIG_QLGE) += qlge/
152 154
153obj-$(CONFIG_PPP) += ppp_generic.o 155obj-$(CONFIG_PPP) += ppp_generic.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index d82a9a99475..ec624ab03e8 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -134,7 +134,7 @@
134#define PCI_DEVICE_ID_SGI_ACENIC 0x0009 134#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
135#endif 135#endif
136 136
137static struct pci_device_id acenic_pci_tbl[] = { 137static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
138 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE, 138 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
139 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 139 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
140 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER, 140 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 766aabfdfc7..bb27b27d967 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
113module_param_array(dynamic_ipg, bool, NULL, 0); 113module_param_array(dynamic_ipg, bool, NULL, 0);
114MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable"); 114MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
115 115
116static struct pci_device_id amd8111e_pci_tbl[] = { 116static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
117 117
118 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462, 118 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -1176,8 +1176,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1176 /* Schedule a polling routine */ 1176 /* Schedule a polling routine */
1177 __napi_schedule(&lp->napi); 1177 __napi_schedule(&lp->napi);
1178 } else if (intren0 & RINTEN0) { 1178 } else if (intren0 & RINTEN0) {
1179 printk("************Driver bug! \ 1179 printk("************Driver bug! interrupt while in poll\n");
1180 interrupt while in poll\n");
1181 /* Fix by disable receive interrupts */ 1180 /* Fix by disable receive interrupts */
1182 writel(RINTEN0, mmio + INTEN0); 1181 writel(RINTEN0, mmio + INTEN0);
1183 } 1182 }
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index dbf4de39754..b68e1eb405f 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -144,7 +144,7 @@ static void __devexit com20020pci_remove(struct pci_dev *pdev)
144 free_netdev(dev); 144 free_netdev(dev);
145} 145}
146 146
147static struct pci_device_id com20020pci_id_table[] = { 147static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
148 { 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 148 { 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
149 { 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 149 { 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
150 { 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 150 { 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index c35af3e106b..e2c202493fa 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -123,9 +123,7 @@ static void ariadne_reset(struct net_device *dev);
123static irqreturn_t ariadne_interrupt(int irq, void *data); 123static irqreturn_t ariadne_interrupt(int irq, void *data);
124static int ariadne_close(struct net_device *dev); 124static int ariadne_close(struct net_device *dev);
125static struct net_device_stats *ariadne_get_stats(struct net_device *dev); 125static struct net_device_stats *ariadne_get_stats(struct net_device *dev);
126#ifdef HAVE_MULTICAST
127static void set_multicast_list(struct net_device *dev); 126static void set_multicast_list(struct net_device *dev);
128#endif
129 127
130 128
131static void memcpyw(volatile u_short *dest, u_short *src, int len) 129static void memcpyw(volatile u_short *dest, u_short *src, int len)
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b25467ac895..bf72d57a0af 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -9,6 +9,8 @@
9 * (at your option) any later version. 9 * (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
13
12#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -20,9 +22,9 @@
20#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
21#include <linux/platform_device.h> 23#include <linux/platform_device.h>
22#include <linux/delay.h> 24#include <linux/delay.h>
23#include <mach/ep93xx-regs.h> 25#include <linux/io.h>
24#include <mach/platform.h> 26
25#include <asm/io.h> 27#include <mach/hardware.h>
26 28
27#define DRV_MODULE_NAME "ep93xx-eth" 29#define DRV_MODULE_NAME "ep93xx-eth"
28#define DRV_MODULE_VERSION "0.1" 30#define DRV_MODULE_VERSION "0.1"
@@ -185,7 +187,47 @@ struct ep93xx_priv
185#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) 187#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
186#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) 188#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
187 189
188static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg); 190static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
191{
192 struct ep93xx_priv *ep = netdev_priv(dev);
193 int data;
194 int i;
195
196 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
197
198 for (i = 0; i < 10; i++) {
199 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
200 break;
201 msleep(1);
202 }
203
204 if (i == 10) {
205 pr_info("mdio read timed out\n");
206 data = 0xffff;
207 } else {
208 data = rdl(ep, REG_MIIDATA);
209 }
210
211 return data;
212}
213
214static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
215{
216 struct ep93xx_priv *ep = netdev_priv(dev);
217 int i;
218
219 wrl(ep, REG_MIIDATA, data);
220 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
221
222 for (i = 0; i < 10; i++) {
223 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
224 break;
225 msleep(1);
226 }
227
228 if (i == 10)
229 pr_info("mdio write timed out\n");
230}
189 231
190static struct net_device_stats *ep93xx_get_stats(struct net_device *dev) 232static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
191{ 233{
@@ -217,14 +259,11 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
217 rstat->rstat1 = 0; 259 rstat->rstat1 = 0;
218 260
219 if (!(rstat0 & RSTAT0_EOF)) 261 if (!(rstat0 & RSTAT0_EOF))
220 printk(KERN_CRIT "ep93xx_rx: not end-of-frame " 262 pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
221 " %.8x %.8x\n", rstat0, rstat1);
222 if (!(rstat0 & RSTAT0_EOB)) 263 if (!(rstat0 & RSTAT0_EOB))
223 printk(KERN_CRIT "ep93xx_rx: not end-of-buffer " 264 pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
224 " %.8x %.8x\n", rstat0, rstat1);
225 if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) 265 if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
226 printk(KERN_CRIT "ep93xx_rx: entry mismatch " 266 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
227 " %.8x %.8x\n", rstat0, rstat1);
228 267
229 if (!(rstat0 & RSTAT0_RWE)) { 268 if (!(rstat0 & RSTAT0_RWE)) {
230 ep->stats.rx_errors++; 269 ep->stats.rx_errors++;
@@ -241,8 +280,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
241 280
242 length = rstat1 & RSTAT1_FRAME_LENGTH; 281 length = rstat1 & RSTAT1_FRAME_LENGTH;
243 if (length > MAX_PKT_SIZE) { 282 if (length > MAX_PKT_SIZE) {
244 printk(KERN_NOTICE "ep93xx_rx: invalid length " 283 pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
245 " %.8x %.8x\n", rstat0, rstat1);
246 goto err; 284 goto err;
247 } 285 }
248 286
@@ -371,11 +409,9 @@ static void ep93xx_tx_complete(struct net_device *dev)
371 tstat->tstat0 = 0; 409 tstat->tstat0 = 0;
372 410
373 if (tstat0 & TSTAT0_FA) 411 if (tstat0 & TSTAT0_FA)
374 printk(KERN_CRIT "ep93xx_tx_complete: frame aborted " 412 pr_crit("frame aborted %.8x\n", tstat0);
375 " %.8x\n", tstat0);
376 if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) 413 if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
377 printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch " 414 pr_crit("entry mismatch %.8x\n", tstat0);
378 " %.8x\n", tstat0);
379 415
380 if (tstat0 & TSTAT0_TXWE) { 416 if (tstat0 & TSTAT0_TXWE) {
381 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; 417 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
@@ -536,7 +572,7 @@ static int ep93xx_start_hw(struct net_device *dev)
536 } 572 }
537 573
538 if (i == 10) { 574 if (i == 10) {
539 printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); 575 pr_crit("hw failed to reset\n");
540 return 1; 576 return 1;
541 } 577 }
542 578
@@ -581,7 +617,7 @@ static int ep93xx_start_hw(struct net_device *dev)
581 } 617 }
582 618
583 if (i == 10) { 619 if (i == 10) {
584 printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n"); 620 pr_crit("hw failed to start\n");
585 return 1; 621 return 1;
586 } 622 }
587 623
@@ -617,7 +653,7 @@ static void ep93xx_stop_hw(struct net_device *dev)
617 } 653 }
618 654
619 if (i == 10) 655 if (i == 10)
620 printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); 656 pr_crit("hw failed to reset\n");
621} 657}
622 658
623static int ep93xx_open(struct net_device *dev) 659static int ep93xx_open(struct net_device *dev)
@@ -681,48 +717,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
681 return generic_mii_ioctl(&ep->mii, data, cmd, NULL); 717 return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
682} 718}
683 719
684static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
685{
686 struct ep93xx_priv *ep = netdev_priv(dev);
687 int data;
688 int i;
689
690 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
691
692 for (i = 0; i < 10; i++) {
693 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
694 break;
695 msleep(1);
696 }
697
698 if (i == 10) {
699 printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n");
700 data = 0xffff;
701 } else {
702 data = rdl(ep, REG_MIIDATA);
703 }
704
705 return data;
706}
707
708static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
709{
710 struct ep93xx_priv *ep = netdev_priv(dev);
711 int i;
712
713 wrl(ep, REG_MIIDATA, data);
714 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
715
716 for (i = 0; i < 10; i++) {
717 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
718 break;
719 msleep(1);
720 }
721
722 if (i == 10)
723 printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n");
724}
725
726static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 720static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
727{ 721{
728 strcpy(info->driver, DRV_MODULE_NAME); 722 strcpy(info->driver, DRV_MODULE_NAME);
@@ -825,12 +819,19 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
825 struct ep93xx_eth_data *data; 819 struct ep93xx_eth_data *data;
826 struct net_device *dev; 820 struct net_device *dev;
827 struct ep93xx_priv *ep; 821 struct ep93xx_priv *ep;
822 struct resource *mem;
823 int irq;
828 int err; 824 int err;
829 825
830 if (pdev == NULL) 826 if (pdev == NULL)
831 return -ENODEV; 827 return -ENODEV;
832 data = pdev->dev.platform_data; 828 data = pdev->dev.platform_data;
833 829
830 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
831 irq = platform_get_irq(pdev, 0);
832 if (!mem || irq < 0)
833 return -ENXIO;
834
834 dev = ep93xx_dev_alloc(data); 835 dev = ep93xx_dev_alloc(data);
835 if (dev == NULL) { 836 if (dev == NULL) {
836 err = -ENOMEM; 837 err = -ENOMEM;
@@ -842,23 +843,21 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
842 843
843 platform_set_drvdata(pdev, dev); 844 platform_set_drvdata(pdev, dev);
844 845
845 ep->res = request_mem_region(pdev->resource[0].start, 846 ep->res = request_mem_region(mem->start, resource_size(mem),
846 pdev->resource[0].end - pdev->resource[0].start + 1, 847 dev_name(&pdev->dev));
847 dev_name(&pdev->dev));
848 if (ep->res == NULL) { 848 if (ep->res == NULL) {
849 dev_err(&pdev->dev, "Could not reserve memory region\n"); 849 dev_err(&pdev->dev, "Could not reserve memory region\n");
850 err = -ENOMEM; 850 err = -ENOMEM;
851 goto err_out; 851 goto err_out;
852 } 852 }
853 853
854 ep->base_addr = ioremap(pdev->resource[0].start, 854 ep->base_addr = ioremap(mem->start, resource_size(mem));
855 pdev->resource[0].end - pdev->resource[0].start);
856 if (ep->base_addr == NULL) { 855 if (ep->base_addr == NULL) {
857 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 856 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
858 err = -EIO; 857 err = -EIO;
859 goto err_out; 858 goto err_out;
860 } 859 }
861 ep->irq = pdev->resource[1].start; 860 ep->irq = irq;
862 861
863 ep->mii.phy_id = data->phy_id; 862 ep->mii.phy_id = data->phy_id;
864 ep->mii.phy_id_mask = 0x1f; 863 ep->mii.phy_id_mask = 0x1f;
@@ -877,11 +876,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
877 goto err_out; 876 goto err_out;
878 } 877 }
879 878
880 printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, " 879 printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
881 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name, 880 dev->name, ep->irq, dev->dev_addr);
882 ep->irq, data->dev_addr[0], data->dev_addr[1],
883 data->dev_addr[2], data->dev_addr[3],
884 data->dev_addr[4], data->dev_addr[5]);
885 881
886 return 0; 882 return 0;
887 883
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 2f4be59b9c0..d98095df05b 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -35,7 +35,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) } 36 * Class, Class Mask, private data (not used) }
37 */ 37 */
38static struct pci_device_id atl1c_pci_tbl[] = { 38static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)}, 40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
41 /* required last entry */ 41 /* required last entry */
@@ -2596,11 +2596,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2596 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 2596 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2597 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 2597 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
2598 if (netif_msg_probe(adapter)) 2598 if (netif_msg_probe(adapter))
2599 dev_dbg(&pdev->dev, 2599 dev_dbg(&pdev->dev, "mac address : %pM\n",
2600 "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n", 2600 adapter->hw.mac_addr);
2601 adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
2602 adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
2603 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
2604 2601
2605 atl1c_hw_set_mac_addr(&adapter->hw); 2602 atl1c_hw_set_mac_addr(&adapter->hw);
2606 INIT_WORK(&adapter->common_task, atl1c_common_task); 2603 INIT_WORK(&adapter->common_task, atl1c_common_task);
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 08f8c0969e9..d59f8e89c65 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -35,7 +35,7 @@ char atl1e_driver_version[] = DRV_VERSION;
35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 35 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36 * Class, Class Mask, private data (not used) } 36 * Class, Class Mask, private data (not used) }
37 */ 37 */
38static struct pci_device_id atl1e_pci_tbl[] = { 38static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, 40 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
41 /* required last entry */ 41 /* required last entry */
@@ -2378,10 +2378,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2378 2378
2379 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 2379 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2380 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 2380 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
2381 dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n", 2381 dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr);
2382 adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
2383 adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
2384 adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
2385 2382
2386 INIT_WORK(&adapter->reset_task, atl1e_reset_task); 2383 INIT_WORK(&adapter->reset_task, atl1e_reset_task);
2387 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); 2384 INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index b6cf3263127..9ba547069db 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -232,7 +232,7 @@ static void __devinit atl1_check_options(struct atl1_adapter *adapter)
232/* 232/*
233 * atl1_pci_tbl - PCI Device ID Table 233 * atl1_pci_tbl - PCI Device ID Table
234 */ 234 */
235static const struct pci_device_id atl1_pci_tbl[] = { 235static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
236 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, 236 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
237 /* required last entry */ 237 /* required last entry */
238 {0,} 238 {0,}
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ec52529394a..40cf9e5cb9e 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -63,7 +63,7 @@ MODULE_VERSION(ATL2_DRV_VERSION);
63/* 63/*
64 * atl2_pci_tbl - PCI Device ID Table 64 * atl2_pci_tbl - PCI Device ID Table
65 */ 65 */
66static struct pci_device_id atl2_pci_tbl[] = { 66static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
67 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)}, 67 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
68 /* required last entry */ 68 /* required last entry */
69 {0,} 69 {0,}
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 4869adb6958..44b66be3813 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -102,7 +102,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
102 102
103 103
104#ifdef CONFIG_B44_PCI 104#ifdef CONFIG_B44_PCI
105static const struct pci_device_id b44_pci_tbl[] = { 105static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, 108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index fee6eee7ae5..3227b11131c 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1607,3 +1607,33 @@ err:
1607 spin_unlock_bh(&adapter->mcc_lock); 1607 spin_unlock_bh(&adapter->mcc_lock);
1608 return status; 1608 return status;
1609} 1609}
1610
1611extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1612 struct be_dma_mem *nonemb_cmd)
1613{
1614 struct be_mcc_wrb *wrb;
1615 struct be_cmd_req_seeprom_read *req;
1616 struct be_sge *sge;
1617 int status;
1618
1619 spin_lock_bh(&adapter->mcc_lock);
1620
1621 wrb = wrb_from_mccq(adapter);
1622 req = nonemb_cmd->va;
1623 sge = nonembedded_sgl(wrb);
1624
1625 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1626 OPCODE_COMMON_SEEPROM_READ);
1627
1628 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1629 OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1630
1631 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1632 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1633 sge->len = cpu_to_le32(nonemb_cmd->size);
1634
1635 status = be_mcc_notify_wait(adapter);
1636
1637 spin_unlock_bh(&adapter->mcc_lock);
1638 return status;
1639}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 13b33c84108..c622a968c37 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
124#define OPCODE_COMMON_CQ_CREATE 12 124#define OPCODE_COMMON_CQ_CREATE 12
125#define OPCODE_COMMON_EQ_CREATE 13 125#define OPCODE_COMMON_EQ_CREATE 13
126#define OPCODE_COMMON_MCC_CREATE 21 126#define OPCODE_COMMON_MCC_CREATE 21
127#define OPCODE_COMMON_SEEPROM_READ 30
127#define OPCODE_COMMON_NTWK_RX_FILTER 34 128#define OPCODE_COMMON_NTWK_RX_FILTER 34
128#define OPCODE_COMMON_GET_FW_VERSION 35 129#define OPCODE_COMMON_GET_FW_VERSION 35
129#define OPCODE_COMMON_SET_FLOW_CONTROL 36 130#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -855,6 +856,19 @@ struct be_cmd_resp_ddrdma_test {
855 u8 rcv_buff[4096]; 856 u8 rcv_buff[4096];
856}; 857};
857 858
859/*********************** SEEPROM Read ***********************/
860
861#define BE_READ_SEEPROM_LEN 1024
862struct be_cmd_req_seeprom_read {
863 struct be_cmd_req_hdr hdr;
864 u8 rsvd0[BE_READ_SEEPROM_LEN];
865};
866
867struct be_cmd_resp_seeprom_read {
868 struct be_cmd_req_hdr hdr;
869 u8 seeprom_data[BE_READ_SEEPROM_LEN];
870};
871
858extern int be_pci_fnum_get(struct be_adapter *adapter); 872extern int be_pci_fnum_get(struct be_adapter *adapter);
859extern int be_cmd_POST(struct be_adapter *adapter); 873extern int be_cmd_POST(struct be_adapter *adapter);
860extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 874extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -927,5 +941,8 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
927 u32 num_pkts, u64 pattern); 941 u32 num_pkts, u64 pattern);
928extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 942extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
929 u32 byte_cnt, struct be_dma_mem *cmd); 943 u32 byte_cnt, struct be_dma_mem *cmd);
944extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
945 struct be_dma_mem *nonemb_cmd);
930extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 946extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
931 u8 loopback_type, u8 enable); 947 u8 loopback_type, u8 enable);
948
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 5d001c4deac..09d8899b2de 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -112,6 +112,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
112 "PHY Loopback test", 112 "PHY Loopback test",
113 "External Loopback test", 113 "External Loopback test",
114 "DDR DMA test" 114 "DDR DMA test"
115 "Link test"
115}; 116};
116 117
117#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests) 118#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
@@ -529,6 +530,9 @@ static void
529be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 530be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
530{ 531{
531 struct be_adapter *adapter = netdev_priv(netdev); 532 struct be_adapter *adapter = netdev_priv(netdev);
533 bool link_up;
534 u8 mac_speed = 0;
535 u16 qos_link_speed = 0;
532 536
533 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 537 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
534 538
@@ -545,12 +549,20 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
545 &data[2]) != 0) { 549 &data[2]) != 0) {
546 test->flags |= ETH_TEST_FL_FAILED; 550 test->flags |= ETH_TEST_FL_FAILED;
547 } 551 }
552 }
548 553
549 data[3] = be_test_ddr_dma(adapter); 554 if (be_test_ddr_dma(adapter) != 0) {
550 if (data[3] != 0) 555 data[3] = 1;
551 test->flags |= ETH_TEST_FL_FAILED; 556 test->flags |= ETH_TEST_FL_FAILED;
552 } 557 }
553 558
559 if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
560 &qos_link_speed) != 0) {
561 test->flags |= ETH_TEST_FL_FAILED;
562 data[4] = -1;
563 } else if (mac_speed) {
564 data[4] = 1;
565 }
554} 566}
555 567
556static int 568static int
@@ -567,12 +579,57 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
567 return be_load_fw(adapter, file_name); 579 return be_load_fw(adapter, file_name);
568} 580}
569 581
582static int
583be_get_eeprom_len(struct net_device *netdev)
584{
585 return BE_READ_SEEPROM_LEN;
586}
587
588static int
589be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
590 uint8_t *data)
591{
592 struct be_adapter *adapter = netdev_priv(netdev);
593 struct be_dma_mem eeprom_cmd;
594 struct be_cmd_resp_seeprom_read *resp;
595 int status;
596
597 if (!eeprom->len)
598 return -EINVAL;
599
600 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
601
602 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
603 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
604 eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
605 &eeprom_cmd.dma);
606
607 if (!eeprom_cmd.va) {
608 dev_err(&adapter->pdev->dev,
609 "Memory allocation failure. Could not read eeprom\n");
610 return -ENOMEM;
611 }
612
613 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
614
615 if (!status) {
616 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
617 memcpy(data, resp->seeprom_data, eeprom->len);
618 }
619 pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
620 eeprom_cmd.dma);
621
622 return status;
623}
624
570const struct ethtool_ops be_ethtool_ops = { 625const struct ethtool_ops be_ethtool_ops = {
571 .get_settings = be_get_settings, 626 .get_settings = be_get_settings,
572 .get_drvinfo = be_get_drvinfo, 627 .get_drvinfo = be_get_drvinfo,
573 .get_wol = be_get_wol, 628 .get_wol = be_get_wol,
574 .set_wol = be_set_wol, 629 .set_wol = be_set_wol,
575 .get_link = ethtool_op_get_link, 630 .get_link = ethtool_op_get_link,
631 .get_eeprom_len = be_get_eeprom_len,
632 .get_eeprom = be_read_eeprom,
576 .get_coalesce = be_get_coalesce, 633 .get_coalesce = be_get_coalesce,
577 .set_coalesce = be_set_coalesce, 634 .set_coalesce = be_set_coalesce,
578 .get_ringparam = be_get_ringparam, 635 .get_ringparam = be_get_ringparam,
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de447e..5917b941aca 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -48,7 +48,6 @@
48#include <linux/cache.h> 48#include <linux/cache.h>
49#include <linux/firmware.h> 49#include <linux/firmware.h>
50#include <linux/log2.h> 50#include <linux/log2.h>
51#include <linux/list.h>
52 51
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 52#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1 53#define BCM_CNIC 1
@@ -3579,14 +3578,14 @@ bnx2_set_rx_mode(struct net_device *dev)
3579 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; 3578 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3580 } 3579 }
3581 3580
3582 if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) { 3581 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3583 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; 3582 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3584 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | 3583 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3585 BNX2_RPM_SORT_USER0_PROM_VLAN; 3584 BNX2_RPM_SORT_USER0_PROM_VLAN;
3586 } else if (!(dev->flags & IFF_PROMISC)) { 3585 } else if (!(dev->flags & IFF_PROMISC)) {
3587 /* Add all entries into to the match filter list */ 3586 /* Add all entries into to the match filter list */
3588 i = 0; 3587 i = 0;
3589 list_for_each_entry(ha, &dev->uc.list, list) { 3588 netdev_for_each_uc_addr(ha, dev) {
3590 bnx2_set_mac_addr(bp, ha->addr, 3589 bnx2_set_mac_addr(bp, ha->addr,
3591 i + BNX2_START_UNICAST_ADDRESS_INDEX); 3590 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3592 sort_mode |= (1 << 3591 sort_mode |= (1 <<
@@ -6145,6 +6144,10 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6145 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); 6144 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6146 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); 6145 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6147 6146
6147 /* Need to flush the previous three writes to ensure MSI-X
6148 * is setup properly */
6149 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6150
6148 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 6151 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6149 msix_ent[i].entry = i; 6152 msix_ent[i].entry = i;
6150 msix_ent[i].vector = 0; 6153 msix_ent[i].vector = 0;
@@ -6227,6 +6230,8 @@ bnx2_open(struct net_device *dev)
6227 6230
6228 atomic_set(&bp->intr_sem, 0); 6231 atomic_set(&bp->intr_sem, 0);
6229 6232
6233 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6234
6230 bnx2_enable_int(bp); 6235 bnx2_enable_int(bp);
6231 6236
6232 if (bp->flags & BNX2_FLAG_USING_MSI) { 6237 if (bp->flags & BNX2_FLAG_USING_MSI) {
@@ -6538,92 +6543,121 @@ bnx2_close(struct net_device *dev)
6538 return 0; 6543 return 0;
6539} 6544}
6540 6545
6541#define GET_NET_STATS64(ctr) \ 6546static void
6547bnx2_save_stats(struct bnx2 *bp)
6548{
6549 u32 *hw_stats = (u32 *) bp->stats_blk;
6550 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6551 int i;
6552
6553 /* The 1st 10 counters are 64-bit counters */
6554 for (i = 0; i < 20; i += 2) {
6555 u32 hi;
6556 u64 lo;
6557
6558 hi = *(temp_stats + i) + *(hw_stats + i);
6559 lo = *(temp_stats + i + 1) + *(hw_stats + i + 1);
6560 if (lo > 0xffffffff)
6561 hi++;
6562 *(temp_stats + i) = hi;
6563 *(temp_stats + i + 1) = lo & 0xffffffff;
6564 }
6565
6566 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6567 *(temp_stats + i) = *(temp_stats + i) + *(hw_stats + i);
6568}
6569
6570#define GET_64BIT_NET_STATS64(ctr) \
6542 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \ 6571 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6543 (unsigned long) (ctr##_lo) 6572 (unsigned long) (ctr##_lo)
6544 6573
6545#define GET_NET_STATS32(ctr) \ 6574#define GET_64BIT_NET_STATS32(ctr) \
6546 (ctr##_lo) 6575 (ctr##_lo)
6547 6576
6548#if (BITS_PER_LONG == 64) 6577#if (BITS_PER_LONG == 64)
6549#define GET_NET_STATS GET_NET_STATS64 6578#define GET_64BIT_NET_STATS(ctr) \
6579 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6580 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6550#else 6581#else
6551#define GET_NET_STATS GET_NET_STATS32 6582#define GET_64BIT_NET_STATS(ctr) \
6583 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6584 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6552#endif 6585#endif
6553 6586
6587#define GET_32BIT_NET_STATS(ctr) \
6588 (unsigned long) (bp->stats_blk->ctr + \
6589 bp->temp_stats_blk->ctr)
6590
6554static struct net_device_stats * 6591static struct net_device_stats *
6555bnx2_get_stats(struct net_device *dev) 6592bnx2_get_stats(struct net_device *dev)
6556{ 6593{
6557 struct bnx2 *bp = netdev_priv(dev); 6594 struct bnx2 *bp = netdev_priv(dev);
6558 struct statistics_block *stats_blk = bp->stats_blk;
6559 struct net_device_stats *net_stats = &dev->stats; 6595 struct net_device_stats *net_stats = &dev->stats;
6560 6596
6561 if (bp->stats_blk == NULL) { 6597 if (bp->stats_blk == NULL) {
6562 return net_stats; 6598 return net_stats;
6563 } 6599 }
6564 net_stats->rx_packets = 6600 net_stats->rx_packets =
6565 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) + 6601 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6566 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) + 6602 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6567 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts); 6603 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6568 6604
6569 net_stats->tx_packets = 6605 net_stats->tx_packets =
6570 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) + 6606 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6571 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) + 6607 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6572 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts); 6608 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6573 6609
6574 net_stats->rx_bytes = 6610 net_stats->rx_bytes =
6575 GET_NET_STATS(stats_blk->stat_IfHCInOctets); 6611 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6576 6612
6577 net_stats->tx_bytes = 6613 net_stats->tx_bytes =
6578 GET_NET_STATS(stats_blk->stat_IfHCOutOctets); 6614 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6579 6615
6580 net_stats->multicast = 6616 net_stats->multicast =
6581 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts); 6617 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6582 6618
6583 net_stats->collisions = 6619 net_stats->collisions =
6584 (unsigned long) stats_blk->stat_EtherStatsCollisions; 6620 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6585 6621
6586 net_stats->rx_length_errors = 6622 net_stats->rx_length_errors =
6587 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts + 6623 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6588 stats_blk->stat_EtherStatsOverrsizePkts); 6624 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6589 6625
6590 net_stats->rx_over_errors = 6626 net_stats->rx_over_errors =
6591 (unsigned long) (stats_blk->stat_IfInFTQDiscards + 6627 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6592 stats_blk->stat_IfInMBUFDiscards); 6628 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6593 6629
6594 net_stats->rx_frame_errors = 6630 net_stats->rx_frame_errors =
6595 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors; 6631 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6596 6632
6597 net_stats->rx_crc_errors = 6633 net_stats->rx_crc_errors =
6598 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors; 6634 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6599 6635
6600 net_stats->rx_errors = net_stats->rx_length_errors + 6636 net_stats->rx_errors = net_stats->rx_length_errors +
6601 net_stats->rx_over_errors + net_stats->rx_frame_errors + 6637 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6602 net_stats->rx_crc_errors; 6638 net_stats->rx_crc_errors;
6603 6639
6604 net_stats->tx_aborted_errors = 6640 net_stats->tx_aborted_errors =
6605 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions + 6641 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6606 stats_blk->stat_Dot3StatsLateCollisions); 6642 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6607 6643
6608 if ((CHIP_NUM(bp) == CHIP_NUM_5706) || 6644 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6609 (CHIP_ID(bp) == CHIP_ID_5708_A0)) 6645 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6610 net_stats->tx_carrier_errors = 0; 6646 net_stats->tx_carrier_errors = 0;
6611 else { 6647 else {
6612 net_stats->tx_carrier_errors = 6648 net_stats->tx_carrier_errors =
6613 (unsigned long) 6649 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6614 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6615 } 6650 }
6616 6651
6617 net_stats->tx_errors = 6652 net_stats->tx_errors =
6618 (unsigned long) 6653 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6619 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6620 +
6621 net_stats->tx_aborted_errors + 6654 net_stats->tx_aborted_errors +
6622 net_stats->tx_carrier_errors; 6655 net_stats->tx_carrier_errors;
6623 6656
6624 net_stats->rx_missed_errors = 6657 net_stats->rx_missed_errors =
6625 (unsigned long) (stats_blk->stat_IfInFTQDiscards + 6658 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6626 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop); 6659 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6660 GET_32BIT_NET_STATS(stat_FwRxDrop);
6627 6661
6628 return net_stats; 6662 return net_stats;
6629} 6663}
@@ -7083,6 +7117,9 @@ static int
7083bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx) 7117bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7084{ 7118{
7085 if (netif_running(bp->dev)) { 7119 if (netif_running(bp->dev)) {
7120 /* Reset will erase chipset stats; save them */
7121 bnx2_save_stats(bp);
7122
7086 bnx2_netif_stop(bp); 7123 bnx2_netif_stop(bp);
7087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7124 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7088 bnx2_free_skbs(bp); 7125 bnx2_free_skbs(bp);
@@ -7427,6 +7464,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7427 struct bnx2 *bp = netdev_priv(dev); 7464 struct bnx2 *bp = netdev_priv(dev);
7428 int i; 7465 int i;
7429 u32 *hw_stats = (u32 *) bp->stats_blk; 7466 u32 *hw_stats = (u32 *) bp->stats_blk;
7467 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7430 u8 *stats_len_arr = NULL; 7468 u8 *stats_len_arr = NULL;
7431 7469
7432 if (hw_stats == NULL) { 7470 if (hw_stats == NULL) {
@@ -7443,21 +7481,26 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7443 stats_len_arr = bnx2_5708_stats_len_arr; 7481 stats_len_arr = bnx2_5708_stats_len_arr;
7444 7482
7445 for (i = 0; i < BNX2_NUM_STATS; i++) { 7483 for (i = 0; i < BNX2_NUM_STATS; i++) {
7484 unsigned long offset;
7485
7446 if (stats_len_arr[i] == 0) { 7486 if (stats_len_arr[i] == 0) {
7447 /* skip this counter */ 7487 /* skip this counter */
7448 buf[i] = 0; 7488 buf[i] = 0;
7449 continue; 7489 continue;
7450 } 7490 }
7491
7492 offset = bnx2_stats_offset_arr[i];
7451 if (stats_len_arr[i] == 4) { 7493 if (stats_len_arr[i] == 4) {
7452 /* 4-byte counter */ 7494 /* 4-byte counter */
7453 buf[i] = (u64) 7495 buf[i] = (u64) *(hw_stats + offset) +
7454 *(hw_stats + bnx2_stats_offset_arr[i]); 7496 *(temp_stats + offset);
7455 continue; 7497 continue;
7456 } 7498 }
7457 /* 8-byte counter */ 7499 /* 8-byte counter */
7458 buf[i] = (((u64) *(hw_stats + 7500 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7459 bnx2_stats_offset_arr[i])) << 32) + 7501 *(hw_stats + offset + 1) +
7460 *(hw_stats + bnx2_stats_offset_arr[i] + 1); 7502 (((u64) *(temp_stats + offset)) << 32) +
7503 *(temp_stats + offset + 1);
7461 } 7504 }
7462} 7505}
7463 7506
@@ -7625,7 +7668,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
7625 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size)); 7668 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7626} 7669}
7627 7670
7628#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 7671#ifdef CONFIG_NET_POLL_CONTROLLER
7629static void 7672static void
7630poll_bnx2(struct net_device *dev) 7673poll_bnx2(struct net_device *dev)
7631{ 7674{
@@ -7825,6 +7868,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7825 bp->flags = 0; 7868 bp->flags = 0;
7826 bp->phy_flags = 0; 7869 bp->phy_flags = 0;
7827 7870
7871 bp->temp_stats_blk =
7872 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7873
7874 if (bp->temp_stats_blk == NULL) {
7875 rc = -ENOMEM;
7876 goto err_out;
7877 }
7878
7828 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 7879 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7829 rc = pci_enable_device(pdev); 7880 rc = pci_enable_device(pdev);
7830 if (rc) { 7881 if (rc) {
@@ -8229,7 +8280,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8229#ifdef BCM_VLAN 8280#ifdef BCM_VLAN
8230 .ndo_vlan_rx_register = bnx2_vlan_rx_register, 8281 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8231#endif 8282#endif
8232#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 8283#ifdef CONFIG_NET_POLL_CONTROLLER
8233 .ndo_poll_controller = poll_bnx2, 8284 .ndo_poll_controller = poll_bnx2,
8234#endif 8285#endif
8235}; 8286};
@@ -8346,6 +8397,8 @@ bnx2_remove_one(struct pci_dev *pdev)
8346 if (bp->regview) 8397 if (bp->regview)
8347 iounmap(bp->regview); 8398 iounmap(bp->regview);
8348 8399
8400 kfree(bp->temp_stats_blk);
8401
8349 free_netdev(dev); 8402 free_netdev(dev);
8350 pci_release_regions(pdev); 8403 pci_release_regions(pdev);
8351 pci_disable_device(pdev); 8404 pci_disable_device(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 939dc44d50a..b860fbbff35 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6851,6 +6851,7 @@ struct bnx2 {
6851 dma_addr_t status_blk_mapping; 6851 dma_addr_t status_blk_mapping;
6852 6852
6853 struct statistics_block *stats_blk; 6853 struct statistics_block *stats_blk;
6854 struct statistics_block *temp_stats_blk;
6854 dma_addr_t stats_blk_mapping; 6855 dma_addr_t stats_blk_mapping;
6855 6856
6856 int ctx_pages; 6857 int ctx_pages;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 306c2b8165e..ffc7381969a 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -140,7 +140,7 @@ static struct {
140}; 140};
141 141
142 142
143static const struct pci_device_id bnx2x_pci_tbl[] = { 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
@@ -11731,7 +11731,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
11731 11731
11732#endif 11732#endif
11733 11733
11734#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 11734#ifdef CONFIG_NET_POLL_CONTROLLER
11735static void poll_bnx2x(struct net_device *dev) 11735static void poll_bnx2x(struct net_device *dev)
11736{ 11736{
11737 struct bnx2x *bp = netdev_priv(dev); 11737 struct bnx2x *bp = netdev_priv(dev);
@@ -11755,7 +11755,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11755#ifdef BCM_VLAN 11755#ifdef BCM_VLAN
11756 .ndo_vlan_rx_register = bnx2x_vlan_rx_register, 11756 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11757#endif 11757#endif
11758#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 11758#ifdef CONFIG_NET_POLL_CONTROLLER
11759 .ndo_poll_controller = poll_bnx2x, 11759 .ndo_poll_controller = poll_bnx2x,
11760#endif 11760#endif
11761}; 11761};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index efa0e41bf3e..6221936e957 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2615,6 +2615,17 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2615 unsigned char *arp_ptr; 2615 unsigned char *arp_ptr;
2616 __be32 sip, tip; 2616 __be32 sip, tip;
2617 2617
2618 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2619 /*
2620 * When using VLANS and bonding, dev and oriv_dev may be
2621 * incorrect if the physical interface supports VLAN
2622 * acceleration. With this change ARP validation now
2623 * works for hosts only reachable on the VLAN interface.
2624 */
2625 dev = vlan_dev_real_dev(dev);
2626 orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif);
2627 }
2628
2618 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) 2629 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
2619 goto out; 2630 goto out;
2620 2631
@@ -3296,7 +3307,7 @@ static void bond_remove_proc_entry(struct bonding *bond)
3296/* Create the bonding directory under /proc/net, if doesn't exist yet. 3307/* Create the bonding directory under /proc/net, if doesn't exist yet.
3297 * Caller must hold rtnl_lock. 3308 * Caller must hold rtnl_lock.
3298 */ 3309 */
3299static void bond_create_proc_dir(struct bond_net *bn) 3310static void __net_init bond_create_proc_dir(struct bond_net *bn)
3300{ 3311{
3301 if (!bn->proc_dir) { 3312 if (!bn->proc_dir) {
3302 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); 3313 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
@@ -3309,7 +3320,7 @@ static void bond_create_proc_dir(struct bond_net *bn)
3309/* Destroy the bonding directory under /proc/net, if empty. 3320/* Destroy the bonding directory under /proc/net, if empty.
3310 * Caller must hold rtnl_lock. 3321 * Caller must hold rtnl_lock.
3311 */ 3322 */
3312static void bond_destroy_proc_dir(struct bond_net *bn) 3323static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
3313{ 3324{
3314 if (bn->proc_dir) { 3325 if (bn->proc_dir) {
3315 remove_proc_entry(DRV_NAME, bn->net->proc_net); 3326 remove_proc_entry(DRV_NAME, bn->net->proc_net);
@@ -3327,11 +3338,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
3327{ 3338{
3328} 3339}
3329 3340
3330static void bond_create_proc_dir(struct bond_net *bn) 3341static inline void bond_create_proc_dir(struct bond_net *bn)
3331{ 3342{
3332} 3343}
3333 3344
3334static void bond_destroy_proc_dir(struct bond_net *bn) 3345static inline void bond_destroy_proc_dir(struct bond_net *bn)
3335{ 3346{
3336} 3347}
3337 3348
@@ -4944,7 +4955,7 @@ out_netdev:
4944 goto out; 4955 goto out;
4945} 4956}
4946 4957
4947static int bond_net_init(struct net *net) 4958static int __net_init bond_net_init(struct net *net)
4948{ 4959{
4949 struct bond_net *bn = net_generic(net, bond_net_id); 4960 struct bond_net *bn = net_generic(net, bond_net_id);
4950 4961
@@ -4956,7 +4967,7 @@ static int bond_net_init(struct net *net)
4956 return 0; 4967 return 0;
4957} 4968}
4958 4969
4959static void bond_net_exit(struct net *net) 4970static void __net_exit bond_net_exit(struct net *net)
4960{ 4971{
4961 struct bond_net *bn = net_generic(net, bond_net_id); 4972 struct bond_net *bn = net_generic(net, bond_net_id);
4962 4973
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 166cc7e579c..a2f29a38798 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -342,6 +342,9 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
342 unsigned int mb, prio; 342 unsigned int mb, prio;
343 u32 reg_mid, reg_mcr; 343 u32 reg_mid, reg_mcr;
344 344
345 if (can_dropped_invalid_skb(dev, skb))
346 return NETDEV_TX_OK;
347
345 mb = get_tx_next_mb(priv); 348 mb = get_tx_next_mb(priv);
346 prio = get_tx_next_prio(priv); 349 prio = get_tx_next_prio(priv);
347 350
@@ -1070,6 +1073,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1070 priv->can.bittiming_const = &at91_bittiming_const; 1073 priv->can.bittiming_const = &at91_bittiming_const;
1071 priv->can.do_set_bittiming = at91_set_bittiming; 1074 priv->can.do_set_bittiming = at91_set_bittiming;
1072 priv->can.do_set_mode = at91_set_mode; 1075 priv->can.do_set_mode = at91_set_mode;
1076 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1073 priv->reg_base = addr; 1077 priv->reg_base = addr;
1074 priv->dev = dev; 1078 priv->dev = dev;
1075 priv->clk = clk; 1079 priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 0ec1524523c..bf7f9ba2d90 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -318,6 +318,9 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
318 u16 val; 318 u16 val;
319 int i; 319 int i;
320 320
321 if (can_dropped_invalid_skb(dev, skb))
322 return NETDEV_TX_OK;
323
321 netif_stop_queue(dev); 324 netif_stop_queue(dev);
322 325
323 /* fill id */ 326 /* fill id */
@@ -600,6 +603,7 @@ struct net_device *alloc_bfin_candev(void)
600 priv->can.bittiming_const = &bfin_can_bittiming_const; 603 priv->can.bittiming_const = &bfin_can_bittiming_const;
601 priv->can.do_set_bittiming = bfin_can_set_bittiming; 604 priv->can.do_set_bittiming = bfin_can_set_bittiming;
602 priv->can.do_set_mode = bfin_can_set_mode; 605 priv->can.do_set_mode = bfin_can_set_mode;
606 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
603 607
604 return dev; 608 return dev;
605} 609}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c1bb29f0322..f08f1202ff0 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -592,6 +592,8 @@ static int can_changelink(struct net_device *dev,
592 if (dev->flags & IFF_UP) 592 if (dev->flags & IFF_UP)
593 return -EBUSY; 593 return -EBUSY;
594 cm = nla_data(data[IFLA_CAN_CTRLMODE]); 594 cm = nla_data(data[IFLA_CAN_CTRLMODE]);
595 if (cm->flags & ~priv->ctrlmode_supported)
596 return -EOPNOTSUPP;
595 priv->ctrlmode &= ~cm->mask; 597 priv->ctrlmode &= ~cm->mask;
596 priv->ctrlmode |= cm->flags; 598 priv->ctrlmode |= cm->flags;
597 } 599 }
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 1a72ca066a1..bbe186b5a0e 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -494,12 +494,8 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
494 return NETDEV_TX_BUSY; 494 return NETDEV_TX_BUSY;
495 } 495 }
496 496
497 if (skb->len != sizeof(struct can_frame)) { 497 if (can_dropped_invalid_skb(net, skb))
498 dev_err(&spi->dev, "dropping packet - bad length\n");
499 dev_kfree_skb(skb);
500 net->stats.tx_dropped++;
501 return NETDEV_TX_OK; 498 return NETDEV_TX_OK;
502 }
503 499
504 netif_stop_queue(net); 500 netif_stop_queue(net);
505 priv->tx_skb = skb; 501 priv->tx_skb = skb;
@@ -543,9 +539,14 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
543 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 539 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
544 /* Put device into loopback mode */ 540 /* Put device into loopback mode */
545 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK); 541 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
542 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
543 /* Put device into listen-only mode */
544 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
546 } else { 545 } else {
547 /* Put device into normal mode */ 546 /* Put device into normal mode */
548 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL); 547 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL |
548 (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ?
549 CANCTRL_OSM : 0));
549 550
550 /* Wait for the device to enter normal mode */ 551 /* Wait for the device to enter normal mode */
551 timeout = jiffies + HZ; 552 timeout = jiffies + HZ;
@@ -952,6 +953,10 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
952 priv->can.bittiming_const = &mcp251x_bittiming_const; 953 priv->can.bittiming_const = &mcp251x_bittiming_const;
953 priv->can.do_set_mode = mcp251x_do_set_mode; 954 priv->can.do_set_mode = mcp251x_do_set_mode;
954 priv->can.clock.freq = pdata->oscillator_frequency / 2; 955 priv->can.clock.freq = pdata->oscillator_frequency / 2;
956 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
957 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
958 if (pdata->model == CAN_MCP251X_MCP2515)
959 priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
955 priv->net = net; 960 priv->net = net;
956 dev_set_drvdata(&spi->dev, priv); 961 dev_set_drvdata(&spi->dev, priv);
957 962
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index cd0f2d6f375..27d1d398e25 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -11,12 +11,13 @@ if CAN_MSCAN
11 11
12config CAN_MPC5XXX 12config CAN_MPC5XXX
13 tristate "Freescale MPC5xxx onboard CAN controller" 13 tristate "Freescale MPC5xxx onboard CAN controller"
14 depends on PPC_MPC52xx 14 depends on (PPC_MPC52xx || PPC_MPC512x)
15 ---help--- 15 ---help---
16 If you say yes here you get support for Freescale's MPC5xxx 16 If you say yes here you get support for Freescale's MPC5xxx
17 onboard CAN controller. 17 onboard CAN controller. Currently, the MPC5200, MPC5200B and
18 MPC5121 (Rev. 2 and later) are supported.
18 19
19 This driver can also be built as a module. If so, the module 20 This driver can also be built as a module. If so, the module
20 will be called mscan-mpc5xxx.ko. 21 will be called mscan-mpc5xxx.ko.
21 22
22endif 23endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 1de6f6349b1..03e7c48465a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -29,6 +29,7 @@
29#include <linux/can/dev.h> 29#include <linux/can/dev.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h> 31#include <sysdev/fsl_soc.h>
32#include <linux/clk.h>
32#include <linux/io.h> 33#include <linux/io.h>
33#include <asm/mpc52xx.h> 34#include <asm/mpc52xx.h>
34 35
@@ -36,22 +37,21 @@
36 37
37#define DRV_NAME "mpc5xxx_can" 38#define DRV_NAME "mpc5xxx_can"
38 39
39static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = { 40struct mpc5xxx_can_data {
41 unsigned int type;
42 u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
43 int *mscan_clksrc);
44};
45
46#ifdef CONFIG_PPC_MPC52xx
47static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
40 { .compatible = "fsl,mpc5200-cdm", }, 48 { .compatible = "fsl,mpc5200-cdm", },
41 {} 49 {}
42}; 50};
43 51
44/* 52static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
45 * Get frequency of the MSCAN clock source 53 const char *clock_name,
46 * 54 int *mscan_clksrc)
47 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
48 * can be selected. According to the MPC5200 user's manual, the oscillator
49 * clock is the better choice as it has less jitter but due to a hardware
50 * bug, it can not be selected for the old MPC5200 Rev. A chips.
51 */
52
53static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
54 int clock_src)
55{ 55{
56 unsigned int pvr; 56 unsigned int pvr;
57 struct mpc52xx_cdm __iomem *cdm; 57 struct mpc52xx_cdm __iomem *cdm;
@@ -61,21 +61,33 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
61 61
62 pvr = mfspr(SPRN_PVR); 62 pvr = mfspr(SPRN_PVR);
63 63
64 freq = mpc5xxx_get_bus_frequency(of->node); 64 /*
65 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
66 * (IP_CLK) can be selected as MSCAN clock source. According to
67 * the MPC5200 user's manual, the oscillator clock is the better
68 * choice as it has less jitter. For this reason, it is selected
69 * by default. Unfortunately, it can not be selected for the old
70 * MPC5200 Rev. A chips due to a hardware bug (check errata).
71 */
72 if (clock_name && strcmp(clock_name, "ip") == 0)
73 *mscan_clksrc = MSCAN_CLKSRC_BUS;
74 else
75 *mscan_clksrc = MSCAN_CLKSRC_XTAL;
76
77 freq = mpc5xxx_get_bus_frequency(ofdev->node);
65 if (!freq) 78 if (!freq)
66 return 0; 79 return 0;
67 80
68 if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011) 81 if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
69 return freq; 82 return freq;
70 83
71 /* Determine SYS_XTAL_IN frequency from the clock domain settings */ 84 /* Determine SYS_XTAL_IN frequency from the clock domain settings */
72 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids); 85 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
73 if (!np_cdm) { 86 if (!np_cdm) {
74 dev_err(&of->dev, "can't get clock node!\n"); 87 dev_err(&ofdev->dev, "can't get clock node!\n");
75 return 0; 88 return 0;
76 } 89 }
77 cdm = of_iomap(np_cdm, 0); 90 cdm = of_iomap(np_cdm, 0);
78 of_node_put(np_cdm);
79 91
80 if (in_8(&cdm->ipb_clk_sel) & 0x1) 92 if (in_8(&cdm->ipb_clk_sel) & 0x1)
81 freq *= 2; 93 freq *= 2;
@@ -84,26 +96,174 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
84 freq *= (val & (1 << 5)) ? 8 : 4; 96 freq *= (val & (1 << 5)) ? 8 : 4;
85 freq /= (val & (1 << 6)) ? 12 : 16; 97 freq /= (val & (1 << 6)) ? 12 : 16;
86 98
99 of_node_put(np_cdm);
87 iounmap(cdm); 100 iounmap(cdm);
88 101
89 return freq; 102 return freq;
90} 103}
104#else /* !CONFIG_PPC_MPC52xx */
105static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
106 const char *clock_name,
107 int *mscan_clksrc)
108{
109 return 0;
110}
111#endif /* CONFIG_PPC_MPC52xx */
112
113#ifdef CONFIG_PPC_MPC512x
114struct mpc512x_clockctl {
115 u32 spmr; /* System PLL Mode Reg */
116 u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
117 u32 scfr1; /* System Clk Freq Reg 1 */
118 u32 scfr2; /* System Clk Freq Reg 2 */
119 u32 reserved;
120 u32 bcr; /* Bread Crumb Reg */
121 u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
122 u32 spccr; /* SPDIF Clk Ctrl Reg */
123 u32 cccr; /* CFM Clk Ctrl Reg */
124 u32 dccr; /* DIU Clk Cnfg Reg */
125 u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
126};
127
128static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
129 { .compatible = "fsl,mpc5121-clock", },
130 {}
131};
132
133static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
134 const char *clock_name,
135 int *mscan_clksrc)
136{
137 struct mpc512x_clockctl __iomem *clockctl;
138 struct device_node *np_clock;
139 struct clk *sys_clk, *ref_clk;
140 int plen, clockidx, clocksrc = -1;
141 u32 sys_freq, val, clockdiv = 1, freq = 0;
142 const u32 *pval;
143
144 np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
145 if (!np_clock) {
146 dev_err(&ofdev->dev, "couldn't find clock node\n");
147 return -ENODEV;
148 }
149 clockctl = of_iomap(np_clock, 0);
150 if (!clockctl) {
151 dev_err(&ofdev->dev, "couldn't map clock registers\n");
152 return 0;
153 }
154
155 /* Determine the MSCAN device index from the physical address */
156 pval = of_get_property(ofdev->node, "reg", &plen);
157 BUG_ON(!pval || plen < sizeof(*pval));
158 clockidx = (*pval & 0x80) ? 1 : 0;
159 if (*pval & 0x2000)
160 clockidx += 2;
161
162 /*
163 * Clock source and divider selection: 3 different clock sources
164 * can be selected: "ip", "ref" or "sys". For the latter two, a
165 * clock divider can be defined as well. If the clock source is
166 * not specified by the device tree, we first try to find an
167 * optimal CAN source clock based on the system clock. If that
168 * is not posslible, the reference clock will be used.
169 */
170 if (clock_name && !strcmp(clock_name, "ip")) {
171 *mscan_clksrc = MSCAN_CLKSRC_IPS;
172 freq = mpc5xxx_get_bus_frequency(ofdev->node);
173 } else {
174 *mscan_clksrc = MSCAN_CLKSRC_BUS;
175
176 pval = of_get_property(ofdev->node,
177 "fsl,mscan-clock-divider", &plen);
178 if (pval && plen == sizeof(*pval))
179 clockdiv = *pval;
180 if (!clockdiv)
181 clockdiv = 1;
182
183 if (!clock_name || !strcmp(clock_name, "sys")) {
184 sys_clk = clk_get(&ofdev->dev, "sys_clk");
185 if (!sys_clk) {
186 dev_err(&ofdev->dev, "couldn't get sys_clk\n");
187 goto exit_unmap;
188 }
189 /* Get and round up/down sys clock rate */
190 sys_freq = 1000000 *
191 ((clk_get_rate(sys_clk) + 499999) / 1000000);
192
193 if (!clock_name) {
194 /* A multiple of 16 MHz would be optimal */
195 if ((sys_freq % 16000000) == 0) {
196 clocksrc = 0;
197 clockdiv = sys_freq / 16000000;
198 freq = sys_freq / clockdiv;
199 }
200 } else {
201 clocksrc = 0;
202 freq = sys_freq / clockdiv;
203 }
204 }
205
206 if (clocksrc < 0) {
207 ref_clk = clk_get(&ofdev->dev, "ref_clk");
208 if (!ref_clk) {
209 dev_err(&ofdev->dev, "couldn't get ref_clk\n");
210 goto exit_unmap;
211 }
212 clocksrc = 1;
213 freq = clk_get_rate(ref_clk) / clockdiv;
214 }
215 }
216
217 /* Disable clock */
218 out_be32(&clockctl->mccr[clockidx], 0x0);
219 if (clocksrc >= 0) {
220 /* Set source and divider */
221 val = (clocksrc << 14) | ((clockdiv - 1) << 17);
222 out_be32(&clockctl->mccr[clockidx], val);
223 /* Enable clock */
224 out_be32(&clockctl->mccr[clockidx], val | 0x10000);
225 }
226
227 /* Enable MSCAN clock domain */
228 val = in_be32(&clockctl->sccr[1]);
229 if (!(val & (1 << 25)))
230 out_be32(&clockctl->sccr[1], val | (1 << 25));
231
232 dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
233 *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
234 clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
235
236exit_unmap:
237 of_node_put(np_clock);
238 iounmap(clockctl);
239
240 return freq;
241}
242#else /* !CONFIG_PPC_MPC512x */
243static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
244 const char *clock_name,
245 int *mscan_clksrc)
246{
247 return 0;
248}
249#endif /* CONFIG_PPC_MPC512x */
91 250
92static int __devinit mpc5xxx_can_probe(struct of_device *ofdev, 251static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
93 const struct of_device_id *id) 252 const struct of_device_id *id)
94{ 253{
254 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
95 struct device_node *np = ofdev->node; 255 struct device_node *np = ofdev->node;
96 struct net_device *dev; 256 struct net_device *dev;
97 struct mscan_priv *priv; 257 struct mscan_priv *priv;
98 void __iomem *base; 258 void __iomem *base;
99 const char *clk_src; 259 const char *clock_name = NULL;
100 int err, irq, clock_src; 260 int irq, mscan_clksrc = 0;
261 int err = -ENOMEM;
101 262
102 base = of_iomap(ofdev->node, 0); 263 base = of_iomap(np, 0);
103 if (!base) { 264 if (!base) {
104 dev_err(&ofdev->dev, "couldn't ioremap\n"); 265 dev_err(&ofdev->dev, "couldn't ioremap\n");
105 err = -ENOMEM; 266 return err;
106 goto exit_release_mem;
107 } 267 }
108 268
109 irq = irq_of_parse_and_map(np, 0); 269 irq = irq_of_parse_and_map(np, 0);
@@ -114,37 +274,27 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
114 } 274 }
115 275
116 dev = alloc_mscandev(); 276 dev = alloc_mscandev();
117 if (!dev) { 277 if (!dev)
118 err = -ENOMEM;
119 goto exit_dispose_irq; 278 goto exit_dispose_irq;
120 }
121 279
122 priv = netdev_priv(dev); 280 priv = netdev_priv(dev);
123 priv->reg_base = base; 281 priv->reg_base = base;
124 dev->irq = irq; 282 dev->irq = irq;
125 283
126 /* 284 clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
127 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock 285
128 * (IP_CLK) can be selected as MSCAN clock source. According to 286 BUG_ON(!data);
129 * the MPC5200 user's manual, the oscillator clock is the better 287 priv->type = data->type;
130 * choice as it has less jitter. For this reason, it is selected 288 priv->can.clock.freq = data->get_clock(ofdev, clock_name,
131 * by default. 289 &mscan_clksrc);
132 */
133 clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
134 if (clk_src && strcmp(clk_src, "ip") == 0)
135 clock_src = MSCAN_CLKSRC_BUS;
136 else
137 clock_src = MSCAN_CLKSRC_XTAL;
138 priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
139 if (!priv->can.clock.freq) { 290 if (!priv->can.clock.freq) {
140 dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n"); 291 dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
141 err = -ENODEV;
142 goto exit_free_mscan; 292 goto exit_free_mscan;
143 } 293 }
144 294
145 SET_NETDEV_DEV(dev, &ofdev->dev); 295 SET_NETDEV_DEV(dev, &ofdev->dev);
146 296
147 err = register_mscandev(dev, clock_src); 297 err = register_mscandev(dev, mscan_clksrc);
148 if (err) { 298 if (err) {
149 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", 299 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
150 DRV_NAME, err); 300 DRV_NAME, err);
@@ -164,7 +314,7 @@ exit_dispose_irq:
164 irq_dispose_mapping(irq); 314 irq_dispose_mapping(irq);
165exit_unmap_mem: 315exit_unmap_mem:
166 iounmap(base); 316 iounmap(base);
167exit_release_mem: 317
168 return err; 318 return err;
169} 319}
170 320
@@ -225,8 +375,20 @@ static int mpc5xxx_can_resume(struct of_device *ofdev)
225} 375}
226#endif 376#endif
227 377
378static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
379 .type = MSCAN_TYPE_MPC5200,
380 .get_clock = mpc52xx_can_get_clock,
381};
382
383static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
384 .type = MSCAN_TYPE_MPC5121,
385 .get_clock = mpc512x_can_get_clock,
386};
387
228static struct of_device_id __devinitdata mpc5xxx_can_table[] = { 388static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
229 {.compatible = "fsl,mpc5200-mscan"}, 389 { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
390 /* Note that only MPC5121 Rev. 2 (and later) is supported */
391 { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
230 {}, 392 {},
231}; 393};
232 394
@@ -255,5 +417,5 @@ static void __exit mpc5xxx_can_exit(void)
255module_exit(mpc5xxx_can_exit); 417module_exit(mpc5xxx_can_exit);
256 418
257MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); 419MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
258MODULE_DESCRIPTION("Freescale MPC5200 CAN driver"); 420MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
259MODULE_LICENSE("GPL v2"); 421MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 07346f880ca..6b7dd578d41 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>, 4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy 5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> 6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de> 7 * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License 10 * it under the terms of the version 2 of the GNU General Public License
@@ -152,6 +152,12 @@ static int mscan_start(struct net_device *dev)
152 priv->shadow_canrier = 0; 152 priv->shadow_canrier = 0;
153 priv->flags = 0; 153 priv->flags = 0;
154 154
155 if (priv->type == MSCAN_TYPE_MPC5121) {
156 /* Clear pending bus-off condition */
157 if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
158 out_8(&regs->canmisc, MSCAN_BOHOLD);
159 }
160
155 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE); 161 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
156 if (err) 162 if (err)
157 return err; 163 return err;
@@ -163,8 +169,29 @@ static int mscan_start(struct net_device *dev)
163 out_8(&regs->cantier, 0); 169 out_8(&regs->cantier, 0);
164 170
165 /* Enable receive interrupts. */ 171 /* Enable receive interrupts. */
166 out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | 172 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
167 MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0); 173
174 return 0;
175}
176
177static int mscan_restart(struct net_device *dev)
178{
179 struct mscan_priv *priv = netdev_priv(dev);
180
181 if (priv->type == MSCAN_TYPE_MPC5121) {
182 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
183
184 priv->can.state = CAN_STATE_ERROR_ACTIVE;
185 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
186 "bus-off state expected");
187 out_8(&regs->canmisc, MSCAN_BOHOLD);
188 /* Re-enable receive interrupts. */
189 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
190 } else {
191 if (priv->can.state <= CAN_STATE_BUS_OFF)
192 mscan_set_mode(dev, MSCAN_INIT_MODE);
193 return mscan_start(dev);
194 }
168 195
169 return 0; 196 return 0;
170} 197}
@@ -177,8 +204,8 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
177 int i, rtr, buf_id; 204 int i, rtr, buf_id;
178 u32 can_id; 205 u32 can_id;
179 206
180 if (frame->can_dlc > 8) 207 if (can_dropped_invalid_skb(dev, skb))
181 return -EINVAL; 208 return NETDEV_TX_OK;
182 209
183 out_8(&regs->cantier, 0); 210 out_8(&regs->cantier, 0);
184 211
@@ -359,9 +386,12 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
359 * automatically. To avoid that we stop the chip doing 386 * automatically. To avoid that we stop the chip doing
360 * a light-weight stop (we are in irq-context). 387 * a light-weight stop (we are in irq-context).
361 */ 388 */
362 out_8(&regs->cantier, 0); 389 if (priv->type != MSCAN_TYPE_MPC5121) {
363 out_8(&regs->canrier, 0); 390 out_8(&regs->cantier, 0);
364 setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ); 391 out_8(&regs->canrier, 0);
392 setbits8(&regs->canctl0,
393 MSCAN_SLPRQ | MSCAN_INITRQ);
394 }
365 can_bus_off(dev); 395 can_bus_off(dev);
366 break; 396 break;
367 default: 397 default:
@@ -491,9 +521,7 @@ static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
491 521
492 switch (mode) { 522 switch (mode) {
493 case CAN_MODE_START: 523 case CAN_MODE_START:
494 if (priv->can.state <= CAN_STATE_BUS_OFF) 524 ret = mscan_restart(dev);
495 mscan_set_mode(dev, MSCAN_INIT_MODE);
496 ret = mscan_start(dev);
497 if (ret) 525 if (ret)
498 break; 526 break;
499 if (netif_queue_stopped(dev)) 527 if (netif_queue_stopped(dev))
@@ -592,18 +620,21 @@ static const struct net_device_ops mscan_netdev_ops = {
592 .ndo_start_xmit = mscan_start_xmit, 620 .ndo_start_xmit = mscan_start_xmit,
593}; 621};
594 622
595int register_mscandev(struct net_device *dev, int clock_src) 623int register_mscandev(struct net_device *dev, int mscan_clksrc)
596{ 624{
597 struct mscan_priv *priv = netdev_priv(dev); 625 struct mscan_priv *priv = netdev_priv(dev);
598 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; 626 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
599 u8 ctl1; 627 u8 ctl1;
600 628
601 ctl1 = in_8(&regs->canctl1); 629 ctl1 = in_8(&regs->canctl1);
602 if (clock_src) 630 if (mscan_clksrc)
603 ctl1 |= MSCAN_CLKSRC; 631 ctl1 |= MSCAN_CLKSRC;
604 else 632 else
605 ctl1 &= ~MSCAN_CLKSRC; 633 ctl1 &= ~MSCAN_CLKSRC;
606 634
635 if (priv->type == MSCAN_TYPE_MPC5121)
636 ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
637
607 ctl1 |= MSCAN_CANE; 638 ctl1 |= MSCAN_CANE;
608 out_8(&regs->canctl1, ctl1); 639 out_8(&regs->canctl1, ctl1);
609 udelay(100); 640 udelay(100);
@@ -655,6 +686,7 @@ struct net_device *alloc_mscandev(void)
655 priv->can.bittiming_const = &mscan_bittiming_const; 686 priv->can.bittiming_const = &mscan_bittiming_const;
656 priv->can.do_set_bittiming = mscan_do_set_bittiming; 687 priv->can.do_set_bittiming = mscan_do_set_bittiming;
657 priv->can.do_set_mode = mscan_do_set_mode; 688 priv->can.do_set_mode = mscan_do_set_mode;
689 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
658 690
659 for (i = 0; i < TX_QUEUE_SIZE; i++) { 691 for (i = 0; i < TX_QUEUE_SIZE; i++) {
660 priv->tx_queue[i].id = i; 692 priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 00fc4aaf1ed..4ff966473bc 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -38,18 +38,20 @@
38#define MSCAN_CLKSRC 0x40 38#define MSCAN_CLKSRC 0x40
39#define MSCAN_LOOPB 0x20 39#define MSCAN_LOOPB 0x20
40#define MSCAN_LISTEN 0x10 40#define MSCAN_LISTEN 0x10
41#define MSCAN_BORM 0x08
41#define MSCAN_WUPM 0x04 42#define MSCAN_WUPM 0x04
42#define MSCAN_SLPAK 0x02 43#define MSCAN_SLPAK 0x02
43#define MSCAN_INITAK 0x01 44#define MSCAN_INITAK 0x01
44 45
45/* Use the MPC5200 MSCAN variant? */ 46/* Use the MPC5XXX MSCAN variant? */
46#ifdef CONFIG_PPC 47#ifdef CONFIG_PPC
47#define MSCAN_FOR_MPC5200 48#define MSCAN_FOR_MPC5XXX
48#endif 49#endif
49 50
50#ifdef MSCAN_FOR_MPC5200 51#ifdef MSCAN_FOR_MPC5XXX
51#define MSCAN_CLKSRC_BUS 0 52#define MSCAN_CLKSRC_BUS 0
52#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC 53#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
54#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC
53#else 55#else
54#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC 56#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
55#define MSCAN_CLKSRC_XTAL 0 57#define MSCAN_CLKSRC_XTAL 0
@@ -136,7 +138,7 @@
136#define MSCAN_EFF_RTR_SHIFT 0 138#define MSCAN_EFF_RTR_SHIFT 0
137#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */ 139#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
138 140
139#ifdef MSCAN_FOR_MPC5200 141#ifdef MSCAN_FOR_MPC5XXX
140#define _MSCAN_RESERVED_(n, num) u8 _res##n[num] 142#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
141#define _MSCAN_RESERVED_DSR_SIZE 2 143#define _MSCAN_RESERVED_DSR_SIZE 2
142#else 144#else
@@ -165,67 +167,66 @@ struct mscan_regs {
165 u8 cantbsel; /* + 0x14 0x0a */ 167 u8 cantbsel; /* + 0x14 0x0a */
166 u8 canidac; /* + 0x15 0x0b */ 168 u8 canidac; /* + 0x15 0x0b */
167 u8 reserved; /* + 0x16 0x0c */ 169 u8 reserved; /* + 0x16 0x0c */
168 _MSCAN_RESERVED_(6, 5); /* + 0x17 */ 170 _MSCAN_RESERVED_(6, 2); /* + 0x17 */
169#ifndef MSCAN_FOR_MPC5200 171 u8 canmisc; /* + 0x19 0x0d */
170 u8 canmisc; /* 0x0d */ 172 _MSCAN_RESERVED_(7, 2); /* + 0x1a */
171#endif
172 u8 canrxerr; /* + 0x1c 0x0e */ 173 u8 canrxerr; /* + 0x1c 0x0e */
173 u8 cantxerr; /* + 0x1d 0x0f */ 174 u8 cantxerr; /* + 0x1d 0x0f */
174 _MSCAN_RESERVED_(7, 2); /* + 0x1e */ 175 _MSCAN_RESERVED_(8, 2); /* + 0x1e */
175 u16 canidar1_0; /* + 0x20 0x10 */ 176 u16 canidar1_0; /* + 0x20 0x10 */
176 _MSCAN_RESERVED_(8, 2); /* + 0x22 */ 177 _MSCAN_RESERVED_(9, 2); /* + 0x22 */
177 u16 canidar3_2; /* + 0x24 0x12 */ 178 u16 canidar3_2; /* + 0x24 0x12 */
178 _MSCAN_RESERVED_(9, 2); /* + 0x26 */ 179 _MSCAN_RESERVED_(10, 2); /* + 0x26 */
179 u16 canidmr1_0; /* + 0x28 0x14 */ 180 u16 canidmr1_0; /* + 0x28 0x14 */
180 _MSCAN_RESERVED_(10, 2); /* + 0x2a */ 181 _MSCAN_RESERVED_(11, 2); /* + 0x2a */
181 u16 canidmr3_2; /* + 0x2c 0x16 */ 182 u16 canidmr3_2; /* + 0x2c 0x16 */
182 _MSCAN_RESERVED_(11, 2); /* + 0x2e */ 183 _MSCAN_RESERVED_(12, 2); /* + 0x2e */
183 u16 canidar5_4; /* + 0x30 0x18 */ 184 u16 canidar5_4; /* + 0x30 0x18 */
184 _MSCAN_RESERVED_(12, 2); /* + 0x32 */ 185 _MSCAN_RESERVED_(13, 2); /* + 0x32 */
185 u16 canidar7_6; /* + 0x34 0x1a */ 186 u16 canidar7_6; /* + 0x34 0x1a */
186 _MSCAN_RESERVED_(13, 2); /* + 0x36 */ 187 _MSCAN_RESERVED_(14, 2); /* + 0x36 */
187 u16 canidmr5_4; /* + 0x38 0x1c */ 188 u16 canidmr5_4; /* + 0x38 0x1c */
188 _MSCAN_RESERVED_(14, 2); /* + 0x3a */ 189 _MSCAN_RESERVED_(15, 2); /* + 0x3a */
189 u16 canidmr7_6; /* + 0x3c 0x1e */ 190 u16 canidmr7_6; /* + 0x3c 0x1e */
190 _MSCAN_RESERVED_(15, 2); /* + 0x3e */ 191 _MSCAN_RESERVED_(16, 2); /* + 0x3e */
191 struct { 192 struct {
192 u16 idr1_0; /* + 0x40 0x20 */ 193 u16 idr1_0; /* + 0x40 0x20 */
193 _MSCAN_RESERVED_(16, 2); /* + 0x42 */ 194 _MSCAN_RESERVED_(17, 2); /* + 0x42 */
194 u16 idr3_2; /* + 0x44 0x22 */ 195 u16 idr3_2; /* + 0x44 0x22 */
195 _MSCAN_RESERVED_(17, 2); /* + 0x46 */ 196 _MSCAN_RESERVED_(18, 2); /* + 0x46 */
196 u16 dsr1_0; /* + 0x48 0x24 */ 197 u16 dsr1_0; /* + 0x48 0x24 */
197 _MSCAN_RESERVED_(18, 2); /* + 0x4a */ 198 _MSCAN_RESERVED_(19, 2); /* + 0x4a */
198 u16 dsr3_2; /* + 0x4c 0x26 */ 199 u16 dsr3_2; /* + 0x4c 0x26 */
199 _MSCAN_RESERVED_(19, 2); /* + 0x4e */ 200 _MSCAN_RESERVED_(20, 2); /* + 0x4e */
200 u16 dsr5_4; /* + 0x50 0x28 */ 201 u16 dsr5_4; /* + 0x50 0x28 */
201 _MSCAN_RESERVED_(20, 2); /* + 0x52 */ 202 _MSCAN_RESERVED_(21, 2); /* + 0x52 */
202 u16 dsr7_6; /* + 0x54 0x2a */ 203 u16 dsr7_6; /* + 0x54 0x2a */
203 _MSCAN_RESERVED_(21, 2); /* + 0x56 */ 204 _MSCAN_RESERVED_(22, 2); /* + 0x56 */
204 u8 dlr; /* + 0x58 0x2c */ 205 u8 dlr; /* + 0x58 0x2c */
205 u8:8; /* + 0x59 0x2d */ 206 u8 reserved; /* + 0x59 0x2d */
206 _MSCAN_RESERVED_(22, 2); /* + 0x5a */ 207 _MSCAN_RESERVED_(23, 2); /* + 0x5a */
207 u16 time; /* + 0x5c 0x2e */ 208 u16 time; /* + 0x5c 0x2e */
208 } rx; 209 } rx;
209 _MSCAN_RESERVED_(23, 2); /* + 0x5e */ 210 _MSCAN_RESERVED_(24, 2); /* + 0x5e */
210 struct { 211 struct {
211 u16 idr1_0; /* + 0x60 0x30 */ 212 u16 idr1_0; /* + 0x60 0x30 */
212 _MSCAN_RESERVED_(24, 2); /* + 0x62 */ 213 _MSCAN_RESERVED_(25, 2); /* + 0x62 */
213 u16 idr3_2; /* + 0x64 0x32 */ 214 u16 idr3_2; /* + 0x64 0x32 */
214 _MSCAN_RESERVED_(25, 2); /* + 0x66 */ 215 _MSCAN_RESERVED_(26, 2); /* + 0x66 */
215 u16 dsr1_0; /* + 0x68 0x34 */ 216 u16 dsr1_0; /* + 0x68 0x34 */
216 _MSCAN_RESERVED_(26, 2); /* + 0x6a */ 217 _MSCAN_RESERVED_(27, 2); /* + 0x6a */
217 u16 dsr3_2; /* + 0x6c 0x36 */ 218 u16 dsr3_2; /* + 0x6c 0x36 */
218 _MSCAN_RESERVED_(27, 2); /* + 0x6e */ 219 _MSCAN_RESERVED_(28, 2); /* + 0x6e */
219 u16 dsr5_4; /* + 0x70 0x38 */ 220 u16 dsr5_4; /* + 0x70 0x38 */
220 _MSCAN_RESERVED_(28, 2); /* + 0x72 */ 221 _MSCAN_RESERVED_(29, 2); /* + 0x72 */
221 u16 dsr7_6; /* + 0x74 0x3a */ 222 u16 dsr7_6; /* + 0x74 0x3a */
222 _MSCAN_RESERVED_(29, 2); /* + 0x76 */ 223 _MSCAN_RESERVED_(30, 2); /* + 0x76 */
223 u8 dlr; /* + 0x78 0x3c */ 224 u8 dlr; /* + 0x78 0x3c */
224 u8 tbpr; /* + 0x79 0x3d */ 225 u8 tbpr; /* + 0x79 0x3d */
225 _MSCAN_RESERVED_(30, 2); /* + 0x7a */ 226 _MSCAN_RESERVED_(31, 2); /* + 0x7a */
226 u16 time; /* + 0x7c 0x3e */ 227 u16 time; /* + 0x7c 0x3e */
227 } tx; 228 } tx;
228 _MSCAN_RESERVED_(31, 2); /* + 0x7e */ 229 _MSCAN_RESERVED_(32, 2); /* + 0x7e */
229} __attribute__ ((packed)); 230} __attribute__ ((packed));
230 231
231#undef _MSCAN_RESERVED_ 232#undef _MSCAN_RESERVED_
@@ -237,6 +238,15 @@ struct mscan_regs {
237#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ) 238#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
238#define MSCAN_SET_MODE_RETRIES 255 239#define MSCAN_SET_MODE_RETRIES 255
239#define MSCAN_ECHO_SKB_MAX 3 240#define MSCAN_ECHO_SKB_MAX 3
241#define MSCAN_RX_INTS_ENABLE (MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | \
242 MSCAN_RSTATE1 | MSCAN_RSTATE0 | \
243 MSCAN_TSTATE1 | MSCAN_TSTATE0)
244
245/* MSCAN type variants */
246enum {
247 MSCAN_TYPE_MPC5200,
248 MSCAN_TYPE_MPC5121
249};
240 250
241#define BTR0_BRP_MASK 0x3f 251#define BTR0_BRP_MASK 0x3f
242#define BTR0_SJW_SHIFT 6 252#define BTR0_SJW_SHIFT 6
@@ -270,6 +280,7 @@ struct tx_queue_entry {
270 280
271struct mscan_priv { 281struct mscan_priv {
272 struct can_priv can; /* must be the first member */ 282 struct can_priv can; /* must be the first member */
283 unsigned int type; /* MSCAN type variants */
273 long open_time; 284 long open_time;
274 unsigned long flags; 285 unsigned long flags;
275 void __iomem *reg_base; /* ioremap'ed address to registers */ 286 void __iomem *reg_base; /* ioremap'ed address to registers */
@@ -285,12 +296,7 @@ struct mscan_priv {
285}; 296};
286 297
287extern struct net_device *alloc_mscandev(void); 298extern struct net_device *alloc_mscandev(void);
288/* 299extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
289 * clock_src:
290 * 1 = The MSCAN clock source is the onchip Bus Clock.
291 * 0 = The MSCAN clock source is the chip Oscillator Clock.
292 */
293extern int register_mscandev(struct net_device *dev, int clock_src);
294extern void unregister_mscandev(struct net_device *dev); 300extern void unregister_mscandev(struct net_device *dev);
295 301
296#endif /* __MSCAN_H__ */ 302#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4c674927f24..9e277d64a31 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -44,4 +44,16 @@ config CAN_KVASER_PCI
44 This driver is for the the PCIcanx and PCIcan cards (1, 2 or 44 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
45 4 channel) from Kvaser (http://www.kvaser.com). 45 4 channel) from Kvaser (http://www.kvaser.com).
46 46
47config CAN_PLX_PCI
48 tristate "PLX90xx PCI-bridge based Cards"
49 depends on PCI
50 ---help---
51 This driver is for CAN interface cards based on
52 the PLX90xx PCI bridge.
53 Driver supports now:
54 - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
55 - Adlink PCI-7841/cPCI-7841 SE card
56 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
57 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
58
47endif 59endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 9d245ac0396..ce924553995 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
8obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o 8obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o 9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o 10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
11obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
11 12
12ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 13ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index fd04789d337..87300606abb 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -102,7 +102,7 @@ struct ems_pci_card {
102 102
103#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ 103#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
104 104
105static struct pci_device_id ems_pci_tbl[] = { 105static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
106 /* CPC-PCI v1 */ 106 /* CPC-PCI v1 */
107 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, 107 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
108 /* CPC-PCI v2 */ 108 /* CPC-PCI v2 */
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 7dd7769b971..441e776a7f5 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -109,7 +109,7 @@ struct kvaser_pci {
109#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */ 109#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
110#define KVASER_PCI_DEVICE_ID2 0x0008 110#define KVASER_PCI_DEVICE_ID2 0x0008
111 111
112static struct pci_device_id kvaser_pci_tbl[] = { 112static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = {
113 {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,}, 113 {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
114 {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,}, 114 {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
115 { 0,} 115 { 0,}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
new file mode 100644
index 00000000000..6b46a6395f8
--- /dev/null
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -0,0 +1,472 @@
1/*
2 * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>
3 *
4 * Derived from the ems_pci.c driver:
5 * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
6 * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
7 * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/netdevice.h>
27#include <linux/delay.h>
28#include <linux/pci.h>
29#include <linux/can.h>
30#include <linux/can/dev.h>
31#include <linux/io.h>
32
33#include "sja1000.h"
34
35#define DRV_NAME "sja1000_plx_pci"
36
37MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
38MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
39 "the SJA1000 chips");
40MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
41 "Adlink PCI-7841/cPCI-7841 SE, "
42 "Marathon CAN-bus-PCI, "
43 "TEWS TECHNOLOGIES TPMC810");
44MODULE_LICENSE("GPL v2");
45
46#define PLX_PCI_MAX_CHAN 2
47
48struct plx_pci_card {
49 int channels; /* detected channels count */
50 struct net_device *net_dev[PLX_PCI_MAX_CHAN];
51 void __iomem *conf_addr;
52};
53
54#define PLX_PCI_CAN_CLOCK (16000000 / 2)
55
56/* PLX90xx registers */
57#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
58#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
59 * Serial EEPROM, and Initialization
60 * Control register
61 */
62
63#define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */
64#define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */
65#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
66#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
67
68/*
69 * The board configuration is probably following:
70 * RX1 is connected to ground.
71 * TX1 is not connected.
72 * CLKO is not connected.
73 * Setting the OCR register to 0xDA is a good idea.
74 * This means normal output mode, push-pull and the correct polarity.
75 */
76#define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
77
78/*
79 * In the CDR register, you should set CBP to 1.
80 * You will probably also want to set the clock divider value to 7
81 * (meaning direct oscillator output) because the second SJA1000 chip
82 * is driven by the first one CLKOUT output.
83 */
84#define PLX_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
85
86/* SJA1000 Control Register in the BasicCAN Mode */
87#define REG_CR 0x00
88
89/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/
90#define REG_CR_BASICCAN_INITIAL 0x21
91#define REG_CR_BASICCAN_INITIAL_MASK 0xa1
92#define REG_SR_BASICCAN_INITIAL 0x0c
93#define REG_IR_BASICCAN_INITIAL 0xe0
94
95/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/
96#define REG_MOD_PELICAN_INITIAL 0x01
97#define REG_SR_PELICAN_INITIAL 0x3c
98#define REG_IR_PELICAN_INITIAL 0x00
99
100#define ADLINK_PCI_VENDOR_ID 0x144A
101#define ADLINK_PCI_DEVICE_ID 0x7841
102
103#define MARATHON_PCI_DEVICE_ID 0x2715
104
105#define TEWS_PCI_VENDOR_ID 0x1498
106#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
107
108static void plx_pci_reset_common(struct pci_dev *pdev);
109static void plx_pci_reset_marathon(struct pci_dev *pdev);
110
111struct plx_pci_channel_map {
112 u32 bar;
113 u32 offset;
114 u32 size; /* 0x00 - auto, e.g. length of entire bar */
115};
116
117struct plx_pci_card_info {
118 const char *name;
119 int channel_count;
120 u32 can_clock;
121 u8 ocr; /* output control register */
122 u8 cdr; /* clock divider register */
123
124 /* Parameters for mapping local configuration space */
125 struct plx_pci_channel_map conf_map;
126
127 /* Parameters for mapping the SJA1000 chips */
128 struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN];
129
130 /* Pointer to device-dependent reset function */
131 void (*reset_func)(struct pci_dev *pdev);
132};
133
134static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
135 "Adlink PCI-7841/cPCI-7841", 2,
136 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
137 {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
138 &plx_pci_reset_common
139 /* based on PLX9052 */
140};
141
142static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
143 "Adlink PCI-7841/cPCI-7841 SE", 2,
144 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
145 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
146 &plx_pci_reset_common
147 /* based on PLX9052 */
148};
149
150static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
151 "Marathon CAN-bus-PCI", 2,
152 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
153 {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
154 &plx_pci_reset_marathon
155 /* based on PLX9052 */
156};
157
158static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
159 "TEWS TECHNOLOGIES TPMC810", 2,
160 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
161 {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
162 &plx_pci_reset_common
163 /* based on PLX9030 */
164};
165
166static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
167 {
168 /* Adlink PCI-7841/cPCI-7841 */
169 ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
170 PCI_ANY_ID, PCI_ANY_ID,
171 PCI_CLASS_NETWORK_OTHER << 8, ~0,
172 (kernel_ulong_t)&plx_pci_card_info_adlink
173 },
174 {
175 /* Adlink PCI-7841/cPCI-7841 SE */
176 ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
177 PCI_ANY_ID, PCI_ANY_ID,
178 PCI_CLASS_COMMUNICATION_OTHER << 8, ~0,
179 (kernel_ulong_t)&plx_pci_card_info_adlink_se
180 },
181 {
182 /* Marathon CAN-bus-PCI card */
183 PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
184 PCI_ANY_ID, PCI_ANY_ID,
185 0, 0,
186 (kernel_ulong_t)&plx_pci_card_info_marathon
187 },
188 {
189 /* TEWS TECHNOLOGIES TPMC810 card */
190 TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810,
191 PCI_ANY_ID, PCI_ANY_ID,
192 0, 0,
193 (kernel_ulong_t)&plx_pci_card_info_tews
194 },
195 { 0,}
196};
197MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
198
199static u8 plx_pci_read_reg(const struct sja1000_priv *priv, int port)
200{
201 return ioread8(priv->reg_base + port);
202}
203
204static void plx_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
205{
206 iowrite8(val, priv->reg_base + port);
207}
208
209/*
210 * Check if a CAN controller is present at the specified location
211 * by trying to switch 'em from the Basic mode into the PeliCAN mode.
212 * Also check states of some registers in reset mode.
213 */
214static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
215{
216 int flag = 0;
217
218 /*
219 * Check registers after hardware reset (the Basic mode)
220 * See states on p. 10 of the Datasheet.
221 */
222 if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
223 REG_CR_BASICCAN_INITIAL &&
224 (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
225 (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
226 flag = 1;
227
228 /* Bring the SJA1000 into the PeliCAN mode*/
229 priv->write_reg(priv, REG_CDR, CDR_PELICAN);
230
231 /*
232 * Check registers after reset in the PeliCAN mode.
233 * See states on p. 23 of the Datasheet.
234 */
235 if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
236 priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
237 priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
238 return flag;
239
240 return 0;
241}
242
243/*
244 * PLX90xx software reset
245 * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
246 * For most cards it's enough for reset the SJA1000 chips.
247 */
248static void plx_pci_reset_common(struct pci_dev *pdev)
249{
250 struct plx_pci_card *card = pci_get_drvdata(pdev);
251 u32 cntrl;
252
253 cntrl = ioread32(card->conf_addr + PLX_CNTRL);
254 cntrl |= PLX_PCI_RESET;
255 iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
256 udelay(100);
257 cntrl ^= PLX_PCI_RESET;
258 iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
259};
260
261/* Special reset function for Marathon card */
262static void plx_pci_reset_marathon(struct pci_dev *pdev)
263{
264 void __iomem *reset_addr;
265 int i;
266 int reset_bar[2] = {3, 5};
267
268 plx_pci_reset_common(pdev);
269
270 for (i = 0; i < 2; i++) {
271 reset_addr = pci_iomap(pdev, reset_bar[i], 0);
272 if (!reset_addr) {
273 dev_err(&pdev->dev, "Failed to remap reset "
274 "space %d (BAR%d)\n", i, reset_bar[i]);
275 } else {
276 /* reset the SJA1000 chip */
277 iowrite8(0x1, reset_addr);
278 udelay(100);
279 pci_iounmap(pdev, reset_addr);
280 }
281 }
282}
283
284static void plx_pci_del_card(struct pci_dev *pdev)
285{
286 struct plx_pci_card *card = pci_get_drvdata(pdev);
287 struct net_device *dev;
288 struct sja1000_priv *priv;
289 int i = 0;
290
291 for (i = 0; i < card->channels; i++) {
292 dev = card->net_dev[i];
293 if (!dev)
294 continue;
295
296 dev_info(&pdev->dev, "Removing %s\n", dev->name);
297 unregister_sja1000dev(dev);
298 priv = netdev_priv(dev);
299 if (priv->reg_base)
300 pci_iounmap(pdev, priv->reg_base);
301 free_sja1000dev(dev);
302 }
303
304 plx_pci_reset_common(pdev);
305
306 /*
307 * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
308 * Local_2 interrupts
309 */
310 iowrite32(0x0, card->conf_addr + PLX_INTCSR);
311
312 if (card->conf_addr)
313 pci_iounmap(pdev, card->conf_addr);
314
315 kfree(card);
316
317 pci_disable_device(pdev);
318 pci_set_drvdata(pdev, NULL);
319}
320
321/*
322 * Probe PLX90xx based device for the SJA1000 chips and register each
323 * available CAN channel to SJA1000 Socket-CAN subsystem.
324 */
325static int __devinit plx_pci_add_card(struct pci_dev *pdev,
326 const struct pci_device_id *ent)
327{
328 struct sja1000_priv *priv;
329 struct net_device *dev;
330 struct plx_pci_card *card;
331 struct plx_pci_card_info *ci;
332 int err, i;
333 u32 val;
334 void __iomem *addr;
335
336 ci = (struct plx_pci_card_info *)ent->driver_data;
337
338 if (pci_enable_device(pdev) < 0) {
339 dev_err(&pdev->dev, "Failed to enable PCI device\n");
340 return -ENODEV;
341 }
342
343 dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n",
344 ci->name, PCI_SLOT(pdev->devfn));
345
346 /* Allocate card structures to hold addresses, ... */
347 card = kzalloc(sizeof(*card), GFP_KERNEL);
348 if (!card) {
349 dev_err(&pdev->dev, "Unable to allocate memory\n");
350 pci_disable_device(pdev);
351 return -ENOMEM;
352 }
353
354 pci_set_drvdata(pdev, card);
355
356 card->channels = 0;
357
358 /* Remap PLX90xx configuration space */
359 addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size);
360 if (!addr) {
361 err = -ENOMEM;
362 dev_err(&pdev->dev, "Failed to remap configuration space "
363 "(BAR%d)\n", ci->conf_map.bar);
364 goto failure_cleanup;
365 }
366 card->conf_addr = addr + ci->conf_map.offset;
367
368 ci->reset_func(pdev);
369
370 /* Detect available channels */
371 for (i = 0; i < ci->channel_count; i++) {
372 struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i];
373
374 dev = alloc_sja1000dev(0);
375 if (!dev) {
376 err = -ENOMEM;
377 goto failure_cleanup;
378 }
379
380 card->net_dev[i] = dev;
381 priv = netdev_priv(dev);
382 priv->priv = card;
383 priv->irq_flags = IRQF_SHARED;
384
385 dev->irq = pdev->irq;
386
387 /*
388 * Remap IO space of the SJA1000 chips
389 * This is device-dependent mapping
390 */
391 addr = pci_iomap(pdev, cm->bar, cm->size);
392 if (!addr) {
393 err = -ENOMEM;
394 dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar);
395 goto failure_cleanup;
396 }
397
398 priv->reg_base = addr + cm->offset;
399 priv->read_reg = plx_pci_read_reg;
400 priv->write_reg = plx_pci_write_reg;
401
402 /* Check if channel is present */
403 if (plx_pci_check_sja1000(priv)) {
404 priv->can.clock.freq = ci->can_clock;
405 priv->ocr = ci->ocr;
406 priv->cdr = ci->cdr;
407
408 SET_NETDEV_DEV(dev, &pdev->dev);
409
410 /* Register SJA1000 device */
411 err = register_sja1000dev(dev);
412 if (err) {
413 dev_err(&pdev->dev, "Registering device failed "
414 "(err=%d)\n", err);
415 free_sja1000dev(dev);
416 goto failure_cleanup;
417 }
418
419 card->channels++;
420
421 dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
422 "registered as %s\n", i + 1, priv->reg_base,
423 dev->irq, dev->name);
424 } else {
425 dev_err(&pdev->dev, "Channel #%d not detected\n",
426 i + 1);
427 free_sja1000dev(dev);
428 }
429 }
430
431 if (!card->channels) {
432 err = -ENODEV;
433 goto failure_cleanup;
434 }
435
436 /*
437 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
438 * Local_2 interrupts from the SJA1000 chips
439 */
440 val = ioread32(card->conf_addr + PLX_INTCSR);
441 val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
442 iowrite32(val, card->conf_addr + PLX_INTCSR);
443
444 return 0;
445
446failure_cleanup:
447 dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
448
449 plx_pci_del_card(pdev);
450
451 return err;
452}
453
454static struct pci_driver plx_pci_driver = {
455 .name = DRV_NAME,
456 .id_table = plx_pci_tbl,
457 .probe = plx_pci_add_card,
458 .remove = plx_pci_del_card,
459};
460
461static int __init plx_pci_init(void)
462{
463 return pci_register_driver(&plx_pci_driver);
464}
465
466static void __exit plx_pci_exit(void)
467{
468 pci_unregister_driver(&plx_pci_driver);
469}
470
471module_init(plx_pci_init);
472module_exit(plx_pci_exit);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 542a4f7255b..ace103a4483 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -249,6 +249,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
249 uint8_t dreg; 249 uint8_t dreg;
250 int i; 250 int i;
251 251
252 if (can_dropped_invalid_skb(dev, skb))
253 return NETDEV_TX_OK;
254
252 netif_stop_queue(dev); 255 netif_stop_queue(dev);
253 256
254 fi = dlc = cf->can_dlc; 257 fi = dlc = cf->can_dlc;
@@ -564,6 +567,7 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
564 priv->can.bittiming_const = &sja1000_bittiming_const; 567 priv->can.bittiming_const = &sja1000_bittiming_const;
565 priv->can.do_set_bittiming = sja1000_set_bittiming; 568 priv->can.do_set_bittiming = sja1000_set_bittiming;
566 priv->can.do_set_mode = sja1000_set_mode; 569 priv->can.do_set_mode = sja1000_set_mode;
570 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
567 571
568 if (sizeof_priv) 572 if (sizeof_priv)
569 priv->priv = (void *)priv + sizeof(struct sja1000_priv); 573 priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5c993c2da52..8332e242b0b 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -477,6 +477,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
477 u32 mbxno, mbx_mask, data; 477 u32 mbxno, mbx_mask, data;
478 unsigned long flags; 478 unsigned long flags;
479 479
480 if (can_dropped_invalid_skb(ndev, skb))
481 return NETDEV_TX_OK;
482
480 mbxno = get_tx_head_mb(priv); 483 mbxno = get_tx_head_mb(priv);
481 mbx_mask = BIT(mbxno); 484 mbx_mask = BIT(mbxno);
482 spin_lock_irqsave(&priv->mbx_lock, flags); 485 spin_lock_irqsave(&priv->mbx_lock, flags);
@@ -491,7 +494,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
491 spin_unlock_irqrestore(&priv->mbx_lock, flags); 494 spin_unlock_irqrestore(&priv->mbx_lock, flags);
492 495
493 /* Prepare mailbox for transmission */ 496 /* Prepare mailbox for transmission */
494 data = min_t(u8, cf->can_dlc, 8);
495 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ 497 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
496 data |= HECC_CANMCF_RTR; 498 data |= HECC_CANMCF_RTR;
497 data |= get_tx_head_prio(priv) << 8; 499 data |= get_tx_head_prio(priv) << 8;
@@ -907,6 +909,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
907 priv->can.bittiming_const = &ti_hecc_bittiming_const; 909 priv->can.bittiming_const = &ti_hecc_bittiming_const;
908 priv->can.do_set_mode = ti_hecc_do_set_mode; 910 priv->can.do_set_mode = ti_hecc_do_set_mode;
909 priv->can.do_get_state = ti_hecc_get_state; 911 priv->can.do_get_state = ti_hecc_get_state;
912 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
910 913
911 ndev->irq = irq->start; 914 ndev->irq = irq->start;
912 ndev->flags |= IFF_ECHO; 915 ndev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index efbb05c71bf..bfab283ba9b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -767,6 +767,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
767 size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN 767 size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
768 + sizeof(struct cpc_can_msg); 768 + sizeof(struct cpc_can_msg);
769 769
770 if (can_dropped_invalid_skb(netdev, skb))
771 return NETDEV_TX_OK;
772
770 /* create a URB, and a buffer for it, and copy the data to the URB */ 773 /* create a URB, and a buffer for it, and copy the data to the URB */
771 urb = usb_alloc_urb(0, GFP_ATOMIC); 774 urb = usb_alloc_urb(0, GFP_ATOMIC);
772 if (!urb) { 775 if (!urb) {
@@ -1019,6 +1022,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1019 dev->can.bittiming_const = &ems_usb_bittiming_const; 1022 dev->can.bittiming_const = &ems_usb_bittiming_const;
1020 dev->can.do_set_bittiming = ems_usb_set_bittiming; 1023 dev->can.do_set_bittiming = ems_usb_set_bittiming;
1021 dev->can.do_set_mode = ems_usb_set_mode; 1024 dev->can.do_set_mode = ems_usb_set_mode;
1025 dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1022 1026
1023 netdev->flags |= IFF_ECHO; /* we support local echo */ 1027 netdev->flags |= IFF_ECHO; /* we support local echo */
1024 1028
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 80ac5631398..d124d837ae5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -47,6 +47,7 @@
47#include <linux/if_arp.h> 47#include <linux/if_arp.h>
48#include <linux/if_ether.h> 48#include <linux/if_ether.h>
49#include <linux/can.h> 49#include <linux/can.h>
50#include <linux/can/dev.h>
50#include <net/rtnetlink.h> 51#include <net/rtnetlink.h>
51 52
52static __initdata const char banner[] = 53static __initdata const char banner[] =
@@ -70,10 +71,11 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
70 71
71static void vcan_rx(struct sk_buff *skb, struct net_device *dev) 72static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
72{ 73{
74 struct can_frame *cf = (struct can_frame *)skb->data;
73 struct net_device_stats *stats = &dev->stats; 75 struct net_device_stats *stats = &dev->stats;
74 76
75 stats->rx_packets++; 77 stats->rx_packets++;
76 stats->rx_bytes += skb->len; 78 stats->rx_bytes += cf->can_dlc;
77 79
78 skb->protocol = htons(ETH_P_CAN); 80 skb->protocol = htons(ETH_P_CAN);
79 skb->pkt_type = PACKET_BROADCAST; 81 skb->pkt_type = PACKET_BROADCAST;
@@ -85,11 +87,15 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
85 87
86static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) 88static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
87{ 89{
90 struct can_frame *cf = (struct can_frame *)skb->data;
88 struct net_device_stats *stats = &dev->stats; 91 struct net_device_stats *stats = &dev->stats;
89 int loop; 92 int loop;
90 93
94 if (can_dropped_invalid_skb(dev, skb))
95 return NETDEV_TX_OK;
96
91 stats->tx_packets++; 97 stats->tx_packets++;
92 stats->tx_bytes += skb->len; 98 stats->tx_bytes += cf->can_dlc;
93 99
94 /* set flag whether this packet has to be looped back */ 100 /* set flag whether this packet has to be looped back */
95 loop = skb->pkt_type == PACKET_LOOPBACK; 101 loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -103,7 +109,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
103 * CAN core already did the echo for us 109 * CAN core already did the echo for us
104 */ 110 */
105 stats->rx_packets++; 111 stats->rx_packets++;
106 stats->rx_bytes += skb->len; 112 stats->rx_bytes += cf->can_dlc;
107 } 113 }
108 kfree_skb(skb); 114 kfree_skb(skb);
109 return NETDEV_TX_OK; 115 return NETDEV_TX_OK;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f857afe8e48..ad47e5126fd 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -106,7 +106,7 @@
106#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 106#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
107#define CAS_NCPUS num_online_cpus() 107#define CAS_NCPUS num_online_cpus()
108 108
109#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL) 109#ifdef CONFIG_CASSINI_NAPI
110#define USE_NAPI 110#define USE_NAPI
111#define cas_skb_release(x) netif_receive_skb(x) 111#define cas_skb_release(x) netif_receive_skb(x)
112#else 112#else
@@ -236,7 +236,7 @@ static u16 link_modes[] __devinitdata = {
236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ 236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
237}; 237};
238 238
239static struct pci_device_id cas_pci_tbl[] __devinitdata = { 239static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, 240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, 242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 699d22c5fe0..f6462b54f82 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -334,7 +334,7 @@ static inline int t1_is_asic(const adapter_t *adapter)
334 return adapter->params.is_asic; 334 return adapter->params.is_asic;
335} 335}
336 336
337extern struct pci_device_id t1_pci_tbl[]; 337extern const struct pci_device_id t1_pci_tbl[];
338 338
339static inline int adapter_matches_type(const adapter_t *adapter, 339static inline int adapter_matches_type(const adapter_t *adapter,
340 int version, int revision) 340 int version, int revision)
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 17720c6e5bf..2402d372c88 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -528,7 +528,7 @@ static const struct board_info t1_board[] = {
528 528
529}; 529};
530 530
531struct pci_device_id t1_pci_tbl[] = { 531DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = {
532 CH_DEVICE(8, 0, CH_BRD_T110_1CU), 532 CH_DEVICE(8, 0, CH_BRD_T110_1CU),
533 CH_DEVICE(8, 1, CH_BRD_T110_1CU), 533 CH_DEVICE(8, 1, CH_BRD_T110_1CU),
534 CH_DEVICE(7, 0, CH_BRD_N110_1F), 534 CH_DEVICE(7, 0, CH_BRD_N110_1F),
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c14..73622f5312c 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -80,7 +80,7 @@ enum {
80#define CH_DEVICE(devid, idx) \ 80#define CH_DEVICE(devid, idx) \
81 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } 81 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 82
83static const struct pci_device_id cxgb3_pci_tbl[] = { 83static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
84 CH_DEVICE(0x20, 0), /* PE9000 */ 84 CH_DEVICE(0x20, 0), /* PE9000 */
85 CH_DEVICE(0x21, 1), /* T302E */ 85 CH_DEVICE(0x21, 1), /* T302E */
86 CH_DEVICE(0x22, 2), /* T310E */ 86 CH_DEVICE(0x22, 2), /* T310E */
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 75064eea1d8..9498361119d 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1252,7 +1252,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1252 struct mtutab mtutab; 1252 struct mtutab mtutab;
1253 unsigned int l2t_capacity; 1253 unsigned int l2t_capacity;
1254 1254
1255 t = kcalloc(1, sizeof(*t), GFP_KERNEL); 1255 t = kzalloc(sizeof(*t), GFP_KERNEL);
1256 if (!t) 1256 if (!t)
1257 return -ENOMEM; 1257 return -ENOMEM;
1258 1258
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4..4cf9b7962af 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -480,6 +480,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
480{ 480{
481 if (q->pend_cred >= q->credits / 4) { 481 if (q->pend_cred >= q->credits / 4) {
482 q->pend_cred = 0; 482 q->pend_cred = 0;
483 wmb();
483 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); 484 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
484 } 485 }
485} 486}
@@ -2282,11 +2283,14 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
2282 while (likely(budget_left && is_new_response(r, q))) { 2283 while (likely(budget_left && is_new_response(r, q))) {
2283 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled; 2284 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
2284 struct sk_buff *skb = NULL; 2285 struct sk_buff *skb = NULL;
2285 u32 len, flags = ntohl(r->flags); 2286 u32 len, flags;
2286 __be32 rss_hi = *(const __be32 *)r, 2287 __be32 rss_hi, rss_lo;
2287 rss_lo = r->rss_hdr.rss_hash_val;
2288 2288
2289 rmb();
2289 eth = r->rss_hdr.opcode == CPL_RX_PKT; 2290 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2291 rss_hi = *(const __be32 *)r;
2292 rss_lo = r->rss_hdr.rss_hash_val;
2293 flags = ntohl(r->flags);
2290 2294
2291 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { 2295 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2292 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); 2296 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
@@ -2497,7 +2501,10 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2497 refill_rspq(adap, q, q->credits); 2501 refill_rspq(adap, q, q->credits);
2498 q->credits = 0; 2502 q->credits = 0;
2499 } 2503 }
2500 } while (is_new_response(r, q) && is_pure_response(r)); 2504 if (!is_new_response(r, q))
2505 break;
2506 rmb();
2507 } while (is_pure_response(r));
2501 2508
2502 if (sleeping) 2509 if (sleeping)
2503 check_ring_db(adap, qs, sleeping); 2510 check_ring_db(adap, qs, sleeping);
@@ -2531,6 +2538,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2531 2538
2532 if (!is_new_response(r, q)) 2539 if (!is_new_response(r, q))
2533 return -1; 2540 return -1;
2541 rmb();
2534 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { 2542 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2535 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | 2543 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2536 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); 2544 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 6a6ea038d7a..98da085445e 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1052,12 +1052,9 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1052 board_name = "DEFEA"; 1052 board_name = "DEFEA";
1053 if (dfx_bus_pci) 1053 if (dfx_bus_pci)
1054 board_name = "DEFPA"; 1054 board_name = "DEFPA";
1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, " 1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1056 "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
1057 print_name, board_name, dfx_use_mmio ? "" : "I/O ", 1056 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1058 (long long)bar_start, dev->irq, 1057 (long long)bar_start, dev->irq, dev->dev_addr);
1059 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1060 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1061 1058
1062 /* 1059 /*
1063 * Get memory for descriptor block, consumer block, and other buffers 1060 * Get memory for descriptor block, consumer block, and other buffers
@@ -3631,7 +3628,7 @@ static int __devinit dfx_pci_register(struct pci_dev *,
3631 const struct pci_device_id *); 3628 const struct pci_device_id *);
3632static void __devexit dfx_pci_unregister(struct pci_dev *); 3629static void __devexit dfx_pci_unregister(struct pci_dev *);
3633 3630
3634static struct pci_device_id dfx_pci_table[] = { 3631static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
3635 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) }, 3632 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3636 { } 3633 { }
3637}; 3634};
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 266ec8777ca..7caab3d26a9 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -537,7 +537,7 @@ struct netdev_private {
537 driver_data Data private to the driver. 537 driver_data Data private to the driver.
538*/ 538*/
539 539
540static const struct pci_device_id rio_pci_tbl[] = { 540static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = {
541 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, 541 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
542 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, 542 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
543 { } 543 { }
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 839fb2b136d..5c7a155e849 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -208,7 +208,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ 209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } 210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211static struct pci_device_id e100_id_table[] = { 211static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), 212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), 213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), 214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d29bb532ecc..b608528f26f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -42,7 +42,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation
42 * Macro expands to... 42 * Macro expands to...
43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
44 */ 44 */
45static struct pci_device_id e1000_pci_tbl[] = { 45static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
46 INTEL_E1000_ETHERNET_DEVICE(0x1000), 46 INTEL_E1000_ETHERNET_DEVICE(0x1000),
47 INTEL_E1000_ETHERNET_DEVICE(0x1001), 47 INTEL_E1000_ETHERNET_DEVICE(0x1001),
48 INTEL_E1000_ETHERNET_DEVICE(0x1004), 48 INTEL_E1000_ETHERNET_DEVICE(0x1004),
@@ -2127,7 +2127,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2127 rctl |= E1000_RCTL_VFE; 2127 rctl |= E1000_RCTL_VFE;
2128 } 2128 }
2129 2129
2130 if (netdev->uc.count > rar_entries - 1) { 2130 if (netdev_uc_count(netdev) > rar_entries - 1) {
2131 rctl |= E1000_RCTL_UPE; 2131 rctl |= E1000_RCTL_UPE;
2132 } else if (!(netdev->flags & IFF_PROMISC)) { 2132 } else if (!(netdev->flags & IFF_PROMISC)) {
2133 rctl &= ~E1000_RCTL_UPE; 2133 rctl &= ~E1000_RCTL_UPE;
@@ -2150,7 +2150,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2150 */ 2150 */
2151 i = 1; 2151 i = 1;
2152 if (use_uc) 2152 if (use_uc)
2153 list_for_each_entry(ha, &netdev->uc.list, list) { 2153 netdev_for_each_uc_addr(ha, netdev) {
2154 if (i == rar_entries) 2154 if (i == rar_entries)
2155 break; 2155 break;
2156 e1000_rar_set(hw, ha->addr, i++); 2156 e1000_rar_set(hw, ha->addr, i++);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 02d67d047d9..3c95acb3a87 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -267,8 +267,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
267 } 267 }
268 268
269 switch (hw->mac.type) { 269 switch (hw->mac.type) {
270 case e1000_82573:
271 func->set_lan_id = e1000_set_lan_id_single_port;
272 func->check_mng_mode = e1000e_check_mng_mode_generic;
273 func->led_on = e1000e_led_on_generic;
274 break;
270 case e1000_82574: 275 case e1000_82574:
271 case e1000_82583: 276 case e1000_82583:
277 func->set_lan_id = e1000_set_lan_id_single_port;
272 func->check_mng_mode = e1000_check_mng_mode_82574; 278 func->check_mng_mode = e1000_check_mng_mode_82574;
273 func->led_on = e1000_led_on_82574; 279 func->led_on = e1000_led_on_82574;
274 break; 280 break;
@@ -922,9 +928,12 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
922 ew32(IMC, 0xffffffff); 928 ew32(IMC, 0xffffffff);
923 icr = er32(ICR); 929 icr = er32(ICR);
924 930
925 if (hw->mac.type == e1000_82571 && 931 /* Install any alternate MAC address into RAR0 */
926 hw->dev_spec.e82571.alt_mac_addr_is_present) 932 ret_val = e1000_check_alt_mac_addr_generic(hw);
927 e1000e_set_laa_state_82571(hw, true); 933 if (ret_val)
934 return ret_val;
935
936 e1000e_set_laa_state_82571(hw, true);
928 937
929 /* Reinitialize the 82571 serdes link state machine */ 938 /* Reinitialize the 82571 serdes link state machine */
930 if (hw->phy.media_type == e1000_media_type_internal_serdes) 939 if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1225,32 +1234,6 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
1225} 1234}
1226 1235
1227/** 1236/**
1228 * e1000_update_mc_addr_list_82571 - Update Multicast addresses
1229 * @hw: pointer to the HW structure
1230 * @mc_addr_list: array of multicast addresses to program
1231 * @mc_addr_count: number of multicast addresses to program
1232 * @rar_used_count: the first RAR register free to program
1233 * @rar_count: total number of supported Receive Address Registers
1234 *
1235 * Updates the Receive Address Registers and Multicast Table Array.
1236 * The caller must have a packed mc_addr_list of multicast addresses.
1237 * The parameter rar_count will usually be hw->mac.rar_entry_count
1238 * unless there are workarounds that change this.
1239 **/
1240static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
1241 u8 *mc_addr_list,
1242 u32 mc_addr_count,
1243 u32 rar_used_count,
1244 u32 rar_count)
1245{
1246 if (e1000e_get_laa_state_82571(hw))
1247 rar_count--;
1248
1249 e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
1250 rar_used_count, rar_count);
1251}
1252
1253/**
1254 * e1000_setup_link_82571 - Setup flow control and link settings 1237 * e1000_setup_link_82571 - Setup flow control and link settings
1255 * @hw: pointer to the HW structure 1238 * @hw: pointer to the HW structure
1256 * 1239 *
@@ -1621,6 +1604,29 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1621} 1604}
1622 1605
1623/** 1606/**
1607 * e1000_read_mac_addr_82571 - Read device MAC address
1608 * @hw: pointer to the HW structure
1609 **/
1610static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
1611{
1612 s32 ret_val = 0;
1613
1614 /*
1615 * If there's an alternate MAC address place it in RAR0
1616 * so that it will override the Si installed default perm
1617 * address.
1618 */
1619 ret_val = e1000_check_alt_mac_addr_generic(hw);
1620 if (ret_val)
1621 goto out;
1622
1623 ret_val = e1000_read_mac_addr_generic(hw);
1624
1625out:
1626 return ret_val;
1627}
1628
1629/**
1624 * e1000_power_down_phy_copper_82571 - Remove link during PHY power down 1630 * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
1625 * @hw: pointer to the HW structure 1631 * @hw: pointer to the HW structure
1626 * 1632 *
@@ -1695,10 +1701,11 @@ static struct e1000_mac_operations e82571_mac_ops = {
1695 .cleanup_led = e1000e_cleanup_led_generic, 1701 .cleanup_led = e1000e_cleanup_led_generic,
1696 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, 1702 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
1697 .get_bus_info = e1000e_get_bus_info_pcie, 1703 .get_bus_info = e1000e_get_bus_info_pcie,
1704 .set_lan_id = e1000_set_lan_id_multi_port_pcie,
1698 /* .get_link_up_info: media type dependent */ 1705 /* .get_link_up_info: media type dependent */
1699 /* .led_on: mac type dependent */ 1706 /* .led_on: mac type dependent */
1700 .led_off = e1000e_led_off_generic, 1707 .led_off = e1000e_led_off_generic,
1701 .update_mc_addr_list = e1000_update_mc_addr_list_82571, 1708 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
1702 .write_vfta = e1000_write_vfta_generic, 1709 .write_vfta = e1000_write_vfta_generic,
1703 .clear_vfta = e1000_clear_vfta_82571, 1710 .clear_vfta = e1000_clear_vfta_82571,
1704 .reset_hw = e1000_reset_hw_82571, 1711 .reset_hw = e1000_reset_hw_82571,
@@ -1706,6 +1713,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
1706 .setup_link = e1000_setup_link_82571, 1713 .setup_link = e1000_setup_link_82571,
1707 /* .setup_physical_interface: media type dependent */ 1714 /* .setup_physical_interface: media type dependent */
1708 .setup_led = e1000e_setup_led_generic, 1715 .setup_led = e1000e_setup_led_generic,
1716 .read_mac_addr = e1000_read_mac_addr_82571,
1709}; 1717};
1710 1718
1711static struct e1000_phy_operations e82_phy_ops_igp = { 1719static struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e02e38221ed..db05ec35574 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -460,6 +460,8 @@
460 */ 460 */
461#define E1000_RAR_ENTRIES 15 461#define E1000_RAR_ENTRIES 15
462#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ 462#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
463#define E1000_RAL_MAC_ADDR_LEN 4
464#define E1000_RAH_MAC_ADDR_LEN 2
463 465
464/* Error Codes */ 466/* Error Codes */
465#define E1000_ERR_NVM 1 467#define E1000_ERR_NVM 1
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d236efaf747..318bdb28a7c 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -503,6 +503,8 @@ extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
503extern s32 e1000e_led_on_generic(struct e1000_hw *hw); 503extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
504extern s32 e1000e_led_off_generic(struct e1000_hw *hw); 504extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
505extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); 505extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
506extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
507extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
506extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); 508extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
507extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); 509extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
508extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); 510extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
@@ -517,9 +519,7 @@ extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
517extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); 519extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
518extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 520extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
519 u8 *mc_addr_list, 521 u8 *mc_addr_list,
520 u32 mc_addr_count, 522 u32 mc_addr_count);
521 u32 rar_used_count,
522 u32 rar_count);
523extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 523extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
524extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); 524extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
525extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); 525extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@@ -530,6 +530,7 @@ extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
530extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); 530extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
531extern s32 e1000e_blink_led(struct e1000_hw *hw); 531extern s32 e1000e_blink_led(struct e1000_hw *hw);
532extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); 532extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
533extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
533extern void e1000e_reset_adaptive(struct e1000_hw *hw); 534extern void e1000e_reset_adaptive(struct e1000_hw *hw);
534extern void e1000e_update_adaptive(struct e1000_hw *hw); 535extern void e1000e_update_adaptive(struct e1000_hw *hw);
535 536
@@ -629,7 +630,15 @@ extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16
629extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); 630extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
630extern void e1000e_release_nvm(struct e1000_hw *hw); 631extern void e1000e_release_nvm(struct e1000_hw *hw);
631extern void e1000e_reload_nvm(struct e1000_hw *hw); 632extern void e1000e_reload_nvm(struct e1000_hw *hw);
632extern s32 e1000e_read_mac_addr(struct e1000_hw *hw); 633extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
634
635static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
636{
637 if (hw->mac.ops.read_mac_addr)
638 return hw->mac.ops.read_mac_addr(hw);
639
640 return e1000_read_mac_addr_generic(hw);
641}
633 642
634static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) 643static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
635{ 644{
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e2aa3b78856..27d21589a69 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -246,6 +246,9 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
246 break; 246 break;
247 } 247 }
248 248
249 /* set lan id for port to determine which phy lock to use */
250 hw->mac.ops.set_lan_id(hw);
251
249 return 0; 252 return 0;
250} 253}
251 254
@@ -814,7 +817,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
814 ew32(IMC, 0xffffffff); 817 ew32(IMC, 0xffffffff);
815 icr = er32(ICR); 818 icr = er32(ICR);
816 819
817 return 0; 820 ret_val = e1000_check_alt_mac_addr_generic(hw);
821
822 return ret_val;
818} 823}
819 824
820/** 825/**
@@ -1340,6 +1345,29 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1340} 1345}
1341 1346
1342/** 1347/**
1348 * e1000_read_mac_addr_80003es2lan - Read device MAC address
1349 * @hw: pointer to the HW structure
1350 **/
1351static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
1352{
1353 s32 ret_val = 0;
1354
1355 /*
1356 * If there's an alternate MAC address place it in RAR0
1357 * so that it will override the Si installed default perm
1358 * address.
1359 */
1360 ret_val = e1000_check_alt_mac_addr_generic(hw);
1361 if (ret_val)
1362 goto out;
1363
1364 ret_val = e1000_read_mac_addr_generic(hw);
1365
1366out:
1367 return ret_val;
1368}
1369
1370/**
1343 * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down 1371 * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
1344 * @hw: pointer to the HW structure 1372 * @hw: pointer to the HW structure
1345 * 1373 *
@@ -1403,12 +1431,14 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1403} 1431}
1404 1432
1405static struct e1000_mac_operations es2_mac_ops = { 1433static struct e1000_mac_operations es2_mac_ops = {
1434 .read_mac_addr = e1000_read_mac_addr_80003es2lan,
1406 .id_led_init = e1000e_id_led_init, 1435 .id_led_init = e1000e_id_led_init,
1407 .check_mng_mode = e1000e_check_mng_mode_generic, 1436 .check_mng_mode = e1000e_check_mng_mode_generic,
1408 /* check_for_link dependent on media type */ 1437 /* check_for_link dependent on media type */
1409 .cleanup_led = e1000e_cleanup_led_generic, 1438 .cleanup_led = e1000e_cleanup_led_generic,
1410 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, 1439 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
1411 .get_bus_info = e1000e_get_bus_info_pcie, 1440 .get_bus_info = e1000e_get_bus_info_pcie,
1441 .set_lan_id = e1000_set_lan_id_multi_port_pcie,
1412 .get_link_up_info = e1000_get_link_up_info_80003es2lan, 1442 .get_link_up_info = e1000_get_link_up_info_80003es2lan,
1413 .led_on = e1000e_led_on_generic, 1443 .led_on = e1000e_led_on_generic,
1414 .led_off = e1000e_led_off_generic, 1444 .led_off = e1000e_led_off_generic,
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index eccf29b75c4..8bdcd5f24ef 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -389,6 +389,9 @@ enum e1e_registers {
389 389
390#define E1000_FUNC_1 1 390#define E1000_FUNC_1 1
391 391
392#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
393#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
394
392enum e1000_mac_type { 395enum e1000_mac_type {
393 e1000_82571, 396 e1000_82571,
394 e1000_82572, 397 e1000_82572,
@@ -746,16 +749,18 @@ struct e1000_mac_operations {
746 void (*clear_hw_cntrs)(struct e1000_hw *); 749 void (*clear_hw_cntrs)(struct e1000_hw *);
747 void (*clear_vfta)(struct e1000_hw *); 750 void (*clear_vfta)(struct e1000_hw *);
748 s32 (*get_bus_info)(struct e1000_hw *); 751 s32 (*get_bus_info)(struct e1000_hw *);
752 void (*set_lan_id)(struct e1000_hw *);
749 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); 753 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
750 s32 (*led_on)(struct e1000_hw *); 754 s32 (*led_on)(struct e1000_hw *);
751 s32 (*led_off)(struct e1000_hw *); 755 s32 (*led_off)(struct e1000_hw *);
752 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); 756 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
753 s32 (*reset_hw)(struct e1000_hw *); 757 s32 (*reset_hw)(struct e1000_hw *);
754 s32 (*init_hw)(struct e1000_hw *); 758 s32 (*init_hw)(struct e1000_hw *);
755 s32 (*setup_link)(struct e1000_hw *); 759 s32 (*setup_link)(struct e1000_hw *);
756 s32 (*setup_physical_interface)(struct e1000_hw *); 760 s32 (*setup_physical_interface)(struct e1000_hw *);
757 s32 (*setup_led)(struct e1000_hw *); 761 s32 (*setup_led)(struct e1000_hw *);
758 void (*write_vfta)(struct e1000_hw *, u32, u32); 762 void (*write_vfta)(struct e1000_hw *, u32, u32);
763 s32 (*read_mac_addr)(struct e1000_hw *);
759}; 764};
760 765
761/* Function pointers for the PHY. */ 766/* Function pointers for the PHY. */
@@ -814,6 +819,10 @@ struct e1000_mac_info {
814 u16 ifs_ratio; 819 u16 ifs_ratio;
815 u16 ifs_step_size; 820 u16 ifs_step_size;
816 u16 mta_reg_count; 821 u16 mta_reg_count;
822
823 /* Maximum size of the MTA register table in all supported adapters */
824 #define MAX_MTA_REG 128
825 u32 mta_shadow[MAX_MTA_REG];
817 u16 rar_entry_count; 826 u16 rar_entry_count;
818 827
819 u8 forced_speed_duplex; 828 u8 forced_speed_duplex;
@@ -897,7 +906,6 @@ struct e1000_fc_info {
897 906
898struct e1000_dev_spec_82571 { 907struct e1000_dev_spec_82571 {
899 bool laa_is_present; 908 bool laa_is_present;
900 bool alt_mac_addr_is_present;
901 u32 smb_counter; 909 u32 smb_counter;
902}; 910};
903 911
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b6ecd12788..54d03a0ce3c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -3368,6 +3368,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
3368 /* cleanup_led dependent on mac type */ 3368 /* cleanup_led dependent on mac type */
3369 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 3369 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
3370 .get_bus_info = e1000_get_bus_info_ich8lan, 3370 .get_bus_info = e1000_get_bus_info_ich8lan,
3371 .set_lan_id = e1000_set_lan_id_single_port,
3371 .get_link_up_info = e1000_get_link_up_info_ich8lan, 3372 .get_link_up_info = e1000_get_link_up_info_ich8lan,
3372 /* led_on dependent on mac type */ 3373 /* led_on dependent on mac type */
3373 /* led_off dependent on mac type */ 3374 /* led_off dependent on mac type */
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 2fa9b36a2c5..2425ed11d5c 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -51,10 +51,10 @@ enum e1000_mng_mode {
51 **/ 51 **/
52s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) 52s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
53{ 53{
54 struct e1000_mac_info *mac = &hw->mac;
54 struct e1000_bus_info *bus = &hw->bus; 55 struct e1000_bus_info *bus = &hw->bus;
55 struct e1000_adapter *adapter = hw->adapter; 56 struct e1000_adapter *adapter = hw->adapter;
56 u32 status; 57 u16 pcie_link_status, cap_offset;
57 u16 pcie_link_status, pci_header_type, cap_offset;
58 58
59 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 59 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
60 if (!cap_offset) { 60 if (!cap_offset) {
@@ -68,20 +68,46 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
68 PCIE_LINK_WIDTH_SHIFT); 68 PCIE_LINK_WIDTH_SHIFT);
69 } 69 }
70 70
71 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER, 71 mac->ops.set_lan_id(hw);
72 &pci_header_type);
73 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
74 status = er32(STATUS);
75 bus->func = (status & E1000_STATUS_FUNC_MASK)
76 >> E1000_STATUS_FUNC_SHIFT;
77 } else {
78 bus->func = 0;
79 }
80 72
81 return 0; 73 return 0;
82} 74}
83 75
84/** 76/**
77 * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
78 *
79 * @hw: pointer to the HW structure
80 *
81 * Determines the LAN function id by reading memory-mapped registers
82 * and swaps the port value if requested.
83 **/
84void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
85{
86 struct e1000_bus_info *bus = &hw->bus;
87 u32 reg;
88
89 /*
90 * The status register reports the correct function number
91 * for the device regardless of function swap state.
92 */
93 reg = er32(STATUS);
94 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
95}
96
97/**
98 * e1000_set_lan_id_single_port - Set LAN id for a single port device
99 * @hw: pointer to the HW structure
100 *
101 * Sets the LAN function id to zero for a single port device.
102 **/
103void e1000_set_lan_id_single_port(struct e1000_hw *hw)
104{
105 struct e1000_bus_info *bus = &hw->bus;
106
107 bus->func = 0;
108}
109
110/**
85 * e1000_clear_vfta_generic - Clear VLAN filter table 111 * e1000_clear_vfta_generic - Clear VLAN filter table
86 * @hw: pointer to the HW structure 112 * @hw: pointer to the HW structure
87 * 113 *
@@ -139,6 +165,68 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
139} 165}
140 166
141/** 167/**
168 * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
169 * @hw: pointer to the HW structure
170 *
171 * Checks the nvm for an alternate MAC address. An alternate MAC address
172 * can be setup by pre-boot software and must be treated like a permanent
173 * address and must override the actual permanent MAC address. If an
174 * alternate MAC address is found it is programmed into RAR0, replacing
175 * the permanent address that was installed into RAR0 by the Si on reset.
176 * This function will return SUCCESS unless it encounters an error while
177 * reading the EEPROM.
178 **/
179s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
180{
181 u32 i;
182 s32 ret_val = 0;
183 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
184 u8 alt_mac_addr[ETH_ALEN];
185
186 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
187 &nvm_alt_mac_addr_offset);
188 if (ret_val) {
189 e_dbg("NVM Read Error\n");
190 goto out;
191 }
192
193 if (nvm_alt_mac_addr_offset == 0xFFFF) {
194 /* There is no Alternate MAC Address */
195 goto out;
196 }
197
198 if (hw->bus.func == E1000_FUNC_1)
199 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
200 for (i = 0; i < ETH_ALEN; i += 2) {
201 offset = nvm_alt_mac_addr_offset + (i >> 1);
202 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
203 if (ret_val) {
204 e_dbg("NVM Read Error\n");
205 goto out;
206 }
207
208 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
209 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
210 }
211
212 /* if multicast bit is set, the alternate address will not be used */
213 if (alt_mac_addr[0] & 0x01) {
214 e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
215 goto out;
216 }
217
218 /*
219 * We have a valid alternate MAC address, and we want to treat it the
220 * same as the normal permanent MAC address stored by the HW into the
221 * RAR. Do this by mapping this address into RAR0.
222 */
223 e1000e_rar_set(hw, alt_mac_addr, 0);
224
225out:
226 return ret_val;
227}
228
229/**
142 * e1000e_rar_set - Set receive address register 230 * e1000e_rar_set - Set receive address register
143 * @hw: pointer to the HW structure 231 * @hw: pointer to the HW structure
144 * @addr: pointer to the receive address 232 * @addr: pointer to the receive address
@@ -252,62 +340,34 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
252 * @hw: pointer to the HW structure 340 * @hw: pointer to the HW structure
253 * @mc_addr_list: array of multicast addresses to program 341 * @mc_addr_list: array of multicast addresses to program
254 * @mc_addr_count: number of multicast addresses to program 342 * @mc_addr_count: number of multicast addresses to program
255 * @rar_used_count: the first RAR register free to program
256 * @rar_count: total number of supported Receive Address Registers
257 * 343 *
258 * Updates the Receive Address Registers and Multicast Table Array. 344 * Updates entire Multicast Table Array.
259 * The caller must have a packed mc_addr_list of multicast addresses. 345 * The caller must have a packed mc_addr_list of multicast addresses.
260 * The parameter rar_count will usually be hw->mac.rar_entry_count
261 * unless there are workarounds that change this.
262 **/ 346 **/
263void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 347void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
264 u8 *mc_addr_list, u32 mc_addr_count, 348 u8 *mc_addr_list, u32 mc_addr_count)
265 u32 rar_used_count, u32 rar_count)
266{ 349{
267 u32 i; 350 u32 hash_value, hash_bit, hash_reg;
268 u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC); 351 int i;
269 352
270 if (!mcarray) { 353 /* clear mta_shadow */
271 printk(KERN_ERR "multicast array memory allocation failed\n"); 354 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
272 return;
273 }
274 355
275 /* 356 /* update mta_shadow from mc_addr_list */
276 * Load the first set of multicast addresses into the exact 357 for (i = 0; (u32) i < mc_addr_count; i++) {
277 * filters (RAR). If there are not enough to fill the RAR
278 * array, clear the filters.
279 */
280 for (i = rar_used_count; i < rar_count; i++) {
281 if (mc_addr_count) {
282 e1000e_rar_set(hw, mc_addr_list, i);
283 mc_addr_count--;
284 mc_addr_list += ETH_ALEN;
285 } else {
286 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
287 e1e_flush();
288 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
289 e1e_flush();
290 }
291 }
292
293 /* Load any remaining multicast addresses into the hash table. */
294 for (; mc_addr_count > 0; mc_addr_count--) {
295 u32 hash_value, hash_reg, hash_bit, mta;
296 hash_value = e1000_hash_mc_addr(hw, mc_addr_list); 358 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
297 e_dbg("Hash value = 0x%03X\n", hash_value); 359
298 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 360 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
299 hash_bit = hash_value & 0x1F; 361 hash_bit = hash_value & 0x1F;
300 mta = (1 << hash_bit);
301 mcarray[hash_reg] |= mta;
302 mc_addr_list += ETH_ALEN;
303 }
304 362
305 /* write the hash table completely */ 363 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
306 for (i = 0; i < hw->mac.mta_reg_count; i++) 364 mc_addr_list += (ETH_ALEN);
307 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]); 365 }
308 366
367 /* replace the entire MTA table */
368 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
369 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
309 e1e_flush(); 370 e1e_flush();
310 kfree(mcarray);
311} 371}
312 372
313/** 373/**
@@ -2072,67 +2132,27 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2072} 2132}
2073 2133
2074/** 2134/**
2075 * e1000e_read_mac_addr - Read device MAC address 2135 * e1000_read_mac_addr_generic - Read device MAC address
2076 * @hw: pointer to the HW structure 2136 * @hw: pointer to the HW structure
2077 * 2137 *
2078 * Reads the device MAC address from the EEPROM and stores the value. 2138 * Reads the device MAC address from the EEPROM and stores the value.
2079 * Since devices with two ports use the same EEPROM, we increment the 2139 * Since devices with two ports use the same EEPROM, we increment the
2080 * last bit in the MAC address for the second port. 2140 * last bit in the MAC address for the second port.
2081 **/ 2141 **/
2082s32 e1000e_read_mac_addr(struct e1000_hw *hw) 2142s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
2083{ 2143{
2084 s32 ret_val; 2144 u32 rar_high;
2085 u16 offset, nvm_data, i; 2145 u32 rar_low;
2086 u16 mac_addr_offset = 0; 2146 u16 i;
2087
2088 if (hw->mac.type == e1000_82571) {
2089 /* Check for an alternate MAC address. An alternate MAC
2090 * address can be setup by pre-boot software and must be
2091 * treated like a permanent address and must override the
2092 * actual permanent MAC address.*/
2093 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2094 &mac_addr_offset);
2095 if (ret_val) {
2096 e_dbg("NVM Read Error\n");
2097 return ret_val;
2098 }
2099 if (mac_addr_offset == 0xFFFF)
2100 mac_addr_offset = 0;
2101
2102 if (mac_addr_offset) {
2103 if (hw->bus.func == E1000_FUNC_1)
2104 mac_addr_offset += ETH_ALEN/sizeof(u16);
2105
2106 /* make sure we have a valid mac address here
2107 * before using it */
2108 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2109 &nvm_data);
2110 if (ret_val) {
2111 e_dbg("NVM Read Error\n");
2112 return ret_val;
2113 }
2114 if (nvm_data & 0x0001)
2115 mac_addr_offset = 0;
2116 }
2117 2147
2118 if (mac_addr_offset) 2148 rar_high = er32(RAH(0));
2119 hw->dev_spec.e82571.alt_mac_addr_is_present = 1; 2149 rar_low = er32(RAL(0));
2120 }
2121 2150
2122 for (i = 0; i < ETH_ALEN; i += 2) { 2151 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
2123 offset = mac_addr_offset + (i >> 1); 2152 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
2124 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2125 if (ret_val) {
2126 e_dbg("NVM Read Error\n");
2127 return ret_val;
2128 }
2129 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2130 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2131 }
2132 2153
2133 /* Flip last bit of mac address if we're on second port */ 2154 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
2134 if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1) 2155 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
2135 hw->mac.perm_addr[5] ^= 1;
2136 2156
2137 for (i = 0; i < ETH_ALEN; i++) 2157 for (i = 0; i < ETH_ALEN; i++)
2138 hw->mac.addr[i] = hw->mac.perm_addr[i]; 2158 hw->mac.addr[i] = hw->mac.perm_addr[i];
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57f149b75fb..14a80f8f611 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2541,22 +2541,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2541 * @hw: pointer to the HW structure 2541 * @hw: pointer to the HW structure
2542 * @mc_addr_list: array of multicast addresses to program 2542 * @mc_addr_list: array of multicast addresses to program
2543 * @mc_addr_count: number of multicast addresses to program 2543 * @mc_addr_count: number of multicast addresses to program
2544 * @rar_used_count: the first RAR register free to program
2545 * @rar_count: total number of supported Receive Address Registers
2546 * 2544 *
2547 * Updates the Receive Address Registers and Multicast Table Array. 2545 * Updates the Multicast Table Array.
2548 * The caller must have a packed mc_addr_list of multicast addresses. 2546 * The caller must have a packed mc_addr_list of multicast addresses.
2549 * The parameter rar_count will usually be hw->mac.rar_entry_count
2550 * unless there are workarounds that change this. Currently no func pointer
2551 * exists and all implementations are handled in the generic version of this
2552 * function.
2553 **/ 2547 **/
2554static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, 2548static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
2555 u32 mc_addr_count, u32 rar_used_count, 2549 u32 mc_addr_count)
2556 u32 rar_count)
2557{ 2550{
2558 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2551 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
2559 rar_used_count, rar_count);
2560} 2552}
2561 2553
2562/** 2554/**
@@ -2572,7 +2564,6 @@ static void e1000_set_multi(struct net_device *netdev)
2572{ 2564{
2573 struct e1000_adapter *adapter = netdev_priv(netdev); 2565 struct e1000_adapter *adapter = netdev_priv(netdev);
2574 struct e1000_hw *hw = &adapter->hw; 2566 struct e1000_hw *hw = &adapter->hw;
2575 struct e1000_mac_info *mac = &hw->mac;
2576 struct dev_mc_list *mc_ptr; 2567 struct dev_mc_list *mc_ptr;
2577 u8 *mta_list; 2568 u8 *mta_list;
2578 u32 rctl; 2569 u32 rctl;
@@ -2614,15 +2605,14 @@ static void e1000_set_multi(struct net_device *netdev)
2614 mc_ptr = mc_ptr->next; 2605 mc_ptr = mc_ptr->next;
2615 } 2606 }
2616 2607
2617 e1000_update_mc_addr_list(hw, mta_list, i, 1, 2608 e1000_update_mc_addr_list(hw, mta_list, i);
2618 mac->rar_entry_count);
2619 kfree(mta_list); 2609 kfree(mta_list);
2620 } else { 2610 } else {
2621 /* 2611 /*
2622 * if we're called from probe, we might not have 2612 * if we're called from probe, we might not have
2623 * anything to do here, so clear out the list 2613 * anything to do here, so clear out the list
2624 */ 2614 */
2625 e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count); 2615 e1000_update_mc_addr_list(hw, NULL, 0);
2626 } 2616 }
2627} 2617}
2628 2618
@@ -5134,7 +5124,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5134 5124
5135 e1000_eeprom_checks(adapter); 5125 e1000_eeprom_checks(adapter);
5136 5126
5137 /* copy the MAC address out of the NVM */ 5127 /* copy the MAC address */
5138 if (e1000e_read_mac_addr(&adapter->hw)) 5128 if (e1000e_read_mac_addr(&adapter->hw))
5139 e_err("NVM Read Error while reading MAC address\n"); 5129 e_err("NVM Read Error while reading MAC address\n");
5140 5130
@@ -5326,7 +5316,7 @@ static struct pci_error_handlers e1000_err_handler = {
5326 .resume = e1000_io_resume, 5316 .resume = e1000_io_resume,
5327}; 5317};
5328 5318
5329static struct pci_device_id e1000_pci_tbl[] = { 5319static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5330 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 5320 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
5331 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 5321 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
5332 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 5322 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e1c2076228b..ee01f5a6d0d 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -34,7 +34,7 @@
34 34
35#define DRV_NAME "enic" 35#define DRV_NAME "enic"
36#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" 36#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
37#define DRV_VERSION "1.1.0.100" 37#define DRV_VERSION "1.1.0.241a"
38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" 38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
39#define PFX DRV_NAME ": " 39#define PFX DRV_NAME ": "
40 40
@@ -89,9 +89,12 @@ struct enic {
89 spinlock_t devcmd_lock; 89 spinlock_t devcmd_lock;
90 u8 mac_addr[ETH_ALEN]; 90 u8 mac_addr[ETH_ALEN];
91 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 91 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
92 unsigned int flags;
92 unsigned int mc_count; 93 unsigned int mc_count;
93 int csum_rx_enabled; 94 int csum_rx_enabled;
94 u32 port_mtu; 95 u32 port_mtu;
96 u32 rx_coalesce_usecs;
97 u32 tx_coalesce_usecs;
95 98
96 /* work queue cache line section */ 99 /* work queue cache line section */
97 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; 100 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index f875751af15..c81bc4b1816 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -51,7 +51,7 @@
51#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 51#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
52 52
53/* Supported devices */ 53/* Supported devices */
54static struct pci_device_id enic_id_table[] = { 54static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
55 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 55 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
56 { 0, } /* end of table */ 56 { 0, } /* end of table */
57}; 57};
@@ -261,6 +261,62 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value)
261 enic->msg_enable = value; 261 enic->msg_enable = value;
262} 262}
263 263
264static int enic_get_coalesce(struct net_device *netdev,
265 struct ethtool_coalesce *ecmd)
266{
267 struct enic *enic = netdev_priv(netdev);
268
269 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
270 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
271
272 return 0;
273}
274
275static int enic_set_coalesce(struct net_device *netdev,
276 struct ethtool_coalesce *ecmd)
277{
278 struct enic *enic = netdev_priv(netdev);
279 u32 tx_coalesce_usecs;
280 u32 rx_coalesce_usecs;
281
282 tx_coalesce_usecs = min_t(u32,
283 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
284 ecmd->tx_coalesce_usecs);
285 rx_coalesce_usecs = min_t(u32,
286 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
287 ecmd->rx_coalesce_usecs);
288
289 switch (vnic_dev_get_intr_mode(enic->vdev)) {
290 case VNIC_DEV_INTR_MODE_INTX:
291 if (tx_coalesce_usecs != rx_coalesce_usecs)
292 return -EINVAL;
293
294 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ],
295 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
296 break;
297 case VNIC_DEV_INTR_MODE_MSI:
298 if (tx_coalesce_usecs != rx_coalesce_usecs)
299 return -EINVAL;
300
301 vnic_intr_coalescing_timer_set(&enic->intr[0],
302 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
303 break;
304 case VNIC_DEV_INTR_MODE_MSIX:
305 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ],
306 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
307 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ],
308 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
309 break;
310 default:
311 break;
312 }
313
314 enic->tx_coalesce_usecs = tx_coalesce_usecs;
315 enic->rx_coalesce_usecs = rx_coalesce_usecs;
316
317 return 0;
318}
319
264static const struct ethtool_ops enic_ethtool_ops = { 320static const struct ethtool_ops enic_ethtool_ops = {
265 .get_settings = enic_get_settings, 321 .get_settings = enic_get_settings,
266 .get_drvinfo = enic_get_drvinfo, 322 .get_drvinfo = enic_get_drvinfo,
@@ -278,6 +334,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
278 .set_sg = ethtool_op_set_sg, 334 .set_sg = ethtool_op_set_sg,
279 .get_tso = ethtool_op_get_tso, 335 .get_tso = ethtool_op_get_tso,
280 .set_tso = enic_set_tso, 336 .set_tso = enic_set_tso,
337 .get_coalesce = enic_get_coalesce,
338 .set_coalesce = enic_set_coalesce,
281 .get_flags = ethtool_op_get_flags, 339 .get_flags = ethtool_op_get_flags,
282 .set_flags = ethtool_op_set_flags, 340 .set_flags = ethtool_op_set_flags,
283}; 341};
@@ -363,12 +421,12 @@ static void enic_mtu_check(struct enic *enic)
363 u32 mtu = vnic_dev_mtu(enic->vdev); 421 u32 mtu = vnic_dev_mtu(enic->vdev);
364 422
365 if (mtu && mtu != enic->port_mtu) { 423 if (mtu && mtu != enic->port_mtu) {
424 enic->port_mtu = mtu;
366 if (mtu < enic->netdev->mtu) 425 if (mtu < enic->netdev->mtu)
367 printk(KERN_WARNING PFX 426 printk(KERN_WARNING PFX
368 "%s: interface MTU (%d) set higher " 427 "%s: interface MTU (%d) set higher "
369 "than switch port MTU (%d)\n", 428 "than switch port MTU (%d)\n",
370 enic->netdev->name, enic->netdev->mtu, mtu); 429 enic->netdev->name, enic->netdev->mtu, mtu);
371 enic->port_mtu = mtu;
372 } 430 }
373} 431}
374 432
@@ -673,7 +731,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
673 731
674/* netif_tx_lock held, process context with BHs disabled, or BH */ 732/* netif_tx_lock held, process context with BHs disabled, or BH */
675static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, 733static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
676 struct net_device *netdev) 734 struct net_device *netdev)
677{ 735{
678 struct enic *enic = netdev_priv(netdev); 736 struct enic *enic = netdev_priv(netdev);
679 struct vnic_wq *wq = &enic->wq[0]; 737 struct vnic_wq *wq = &enic->wq[0];
@@ -771,6 +829,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
771 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; 829 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
772 int allmulti = (netdev->flags & IFF_ALLMULTI) || 830 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
773 (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); 831 (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
832 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
774 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 833 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
775 unsigned int mc_count = netdev->mc_count; 834 unsigned int mc_count = netdev->mc_count;
776 unsigned int i, j; 835 unsigned int i, j;
@@ -780,8 +839,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
780 839
781 spin_lock(&enic->devcmd_lock); 840 spin_lock(&enic->devcmd_lock);
782 841
783 vnic_dev_packet_filter(enic->vdev, directed, 842 if (enic->flags != flags) {
784 multicast, broadcast, promisc, allmulti); 843 enic->flags = flags;
844 vnic_dev_packet_filter(enic->vdev, directed,
845 multicast, broadcast, promisc, allmulti);
846 }
785 847
786 /* Is there an easier way? Trying to minimize to 848 /* Is there an easier way? Trying to minimize to
787 * calls to add/del multicast addrs. We keep the 849 * calls to add/del multicast addrs. We keep the
@@ -1084,34 +1146,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1084 return 0; 1146 return 0;
1085} 1147}
1086 1148
1087static void enic_rq_drop_buf(struct vnic_rq *rq,
1088 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1089 int skipped, void *opaque)
1090{
1091 struct enic *enic = vnic_dev_priv(rq->vdev);
1092 struct sk_buff *skb = buf->os_buf;
1093
1094 if (skipped)
1095 return;
1096
1097 pci_unmap_single(enic->pdev, buf->dma_addr,
1098 buf->len, PCI_DMA_FROMDEVICE);
1099
1100 dev_kfree_skb_any(skb);
1101}
1102
1103static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1104 u8 type, u16 q_number, u16 completed_index, void *opaque)
1105{
1106 struct enic *enic = vnic_dev_priv(vdev);
1107
1108 vnic_rq_service(&enic->rq[q_number], cq_desc,
1109 completed_index, VNIC_RQ_RETURN_DESC,
1110 enic_rq_drop_buf, opaque);
1111
1112 return 0;
1113}
1114
1115static int enic_poll(struct napi_struct *napi, int budget) 1149static int enic_poll(struct napi_struct *napi, int budget)
1116{ 1150{
1117 struct enic *enic = container_of(napi, struct enic, napi); 1151 struct enic *enic = container_of(napi, struct enic, napi);
@@ -1119,6 +1153,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1119 unsigned int rq_work_to_do = budget; 1153 unsigned int rq_work_to_do = budget;
1120 unsigned int wq_work_to_do = -1; /* no limit */ 1154 unsigned int wq_work_to_do = -1; /* no limit */
1121 unsigned int work_done, rq_work_done, wq_work_done; 1155 unsigned int work_done, rq_work_done, wq_work_done;
1156 int err;
1122 1157
1123 /* Service RQ (first) and WQ 1158 /* Service RQ (first) and WQ
1124 */ 1159 */
@@ -1142,16 +1177,19 @@ static int enic_poll(struct napi_struct *napi, int budget)
1142 0 /* don't unmask intr */, 1177 0 /* don't unmask intr */,
1143 0 /* don't reset intr timer */); 1178 0 /* don't reset intr timer */);
1144 1179
1145 if (rq_work_done > 0) { 1180 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1146 1181
1147 /* Replenish RQ 1182 /* Buffer allocation failed. Stay in polling
1148 */ 1183 * mode so we can try to fill the ring again.
1184 */
1149 1185
1150 vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1186 if (err)
1187 rq_work_done = rq_work_to_do;
1151 1188
1152 } else { 1189 if (rq_work_done < rq_work_to_do) {
1153 1190
1154 /* If no work done, flush all LROs and exit polling 1191 /* Some work done, but not enough to stay in polling,
1192 * flush all LROs and exit polling
1155 */ 1193 */
1156 1194
1157 if (netdev->features & NETIF_F_LRO) 1195 if (netdev->features & NETIF_F_LRO)
@@ -1170,6 +1208,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1170 struct net_device *netdev = enic->netdev; 1208 struct net_device *netdev = enic->netdev;
1171 unsigned int work_to_do = budget; 1209 unsigned int work_to_do = budget;
1172 unsigned int work_done; 1210 unsigned int work_done;
1211 int err;
1173 1212
1174 /* Service RQ 1213 /* Service RQ
1175 */ 1214 */
@@ -1177,25 +1216,30 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1177 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], 1216 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1178 work_to_do, enic_rq_service, NULL); 1217 work_to_do, enic_rq_service, NULL);
1179 1218
1180 if (work_done > 0) { 1219 /* Return intr event credits for this polling
1181 1220 * cycle. An intr event is the completion of a
1182 /* Replenish RQ 1221 * RQ packet.
1183 */ 1222 */
1184
1185 vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1186
1187 /* Return intr event credits for this polling
1188 * cycle. An intr event is the completion of a
1189 * RQ packet.
1190 */
1191 1223
1224 if (work_done > 0)
1192 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], 1225 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
1193 work_done, 1226 work_done,
1194 0 /* don't unmask intr */, 1227 0 /* don't unmask intr */,
1195 0 /* don't reset intr timer */); 1228 0 /* don't reset intr timer */);
1196 } else {
1197 1229
1198 /* If no work done, flush all LROs and exit polling 1230 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
1231
1232 /* Buffer allocation failed. Stay in polling mode
1233 * so we can try to fill the ring again.
1234 */
1235
1236 if (err)
1237 work_done = work_to_do;
1238
1239 if (work_done < work_to_do) {
1240
1241 /* Some work done, but not enough to stay in polling,
1242 * flush all LROs and exit polling
1199 */ 1243 */
1200 1244
1201 if (netdev->features & NETIF_F_LRO) 1245 if (netdev->features & NETIF_F_LRO)
@@ -1304,6 +1348,24 @@ static int enic_request_intr(struct enic *enic)
1304 return err; 1348 return err;
1305} 1349}
1306 1350
1351static void enic_synchronize_irqs(struct enic *enic)
1352{
1353 unsigned int i;
1354
1355 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1356 case VNIC_DEV_INTR_MODE_INTX:
1357 case VNIC_DEV_INTR_MODE_MSI:
1358 synchronize_irq(enic->pdev->irq);
1359 break;
1360 case VNIC_DEV_INTR_MODE_MSIX:
1361 for (i = 0; i < enic->intr_count; i++)
1362 synchronize_irq(enic->msix_entry[i].vector);
1363 break;
1364 default:
1365 break;
1366 }
1367}
1368
1307static int enic_notify_set(struct enic *enic) 1369static int enic_notify_set(struct enic *enic)
1308{ 1370{
1309 int err; 1371 int err;
@@ -1360,11 +1422,13 @@ static int enic_open(struct net_device *netdev)
1360 } 1422 }
1361 1423
1362 for (i = 0; i < enic->rq_count; i++) { 1424 for (i = 0; i < enic->rq_count; i++) {
1363 err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1425 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
1364 if (err) { 1426 /* Need at least one buffer on ring to get going */
1427 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1365 printk(KERN_ERR PFX 1428 printk(KERN_ERR PFX
1366 "%s: Unable to alloc receive buffers.\n", 1429 "%s: Unable to alloc receive buffers.\n",
1367 netdev->name); 1430 netdev->name);
1431 err = -ENOMEM;
1368 goto err_out_notify_unset; 1432 goto err_out_notify_unset;
1369 } 1433 }
1370 } 1434 }
@@ -1409,16 +1473,19 @@ static int enic_stop(struct net_device *netdev)
1409 unsigned int i; 1473 unsigned int i;
1410 int err; 1474 int err;
1411 1475
1476 for (i = 0; i < enic->intr_count; i++)
1477 vnic_intr_mask(&enic->intr[i]);
1478
1479 enic_synchronize_irqs(enic);
1480
1412 del_timer_sync(&enic->notify_timer); 1481 del_timer_sync(&enic->notify_timer);
1413 1482
1414 spin_lock(&enic->devcmd_lock); 1483 spin_lock(&enic->devcmd_lock);
1415 vnic_dev_disable(enic->vdev); 1484 vnic_dev_disable(enic->vdev);
1416 spin_unlock(&enic->devcmd_lock); 1485 spin_unlock(&enic->devcmd_lock);
1417 napi_disable(&enic->napi); 1486 napi_disable(&enic->napi);
1418 netif_stop_queue(netdev); 1487 netif_carrier_off(netdev);
1419 1488 netif_tx_disable(netdev);
1420 for (i = 0; i < enic->intr_count; i++)
1421 vnic_intr_mask(&enic->intr[i]);
1422 1489
1423 for (i = 0; i < enic->wq_count; i++) { 1490 for (i = 0; i < enic->wq_count; i++) {
1424 err = vnic_wq_disable(&enic->wq[i]); 1491 err = vnic_wq_disable(&enic->wq[i]);
@@ -1436,11 +1503,6 @@ static int enic_stop(struct net_device *netdev)
1436 spin_unlock(&enic->devcmd_lock); 1503 spin_unlock(&enic->devcmd_lock);
1437 enic_free_intr(enic); 1504 enic_free_intr(enic);
1438 1505
1439 (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1440 -1, enic_rq_service_drop, NULL);
1441 (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1442 -1, enic_wq_service, NULL);
1443
1444 for (i = 0; i < enic->wq_count; i++) 1506 for (i = 0; i < enic->wq_count; i++)
1445 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 1507 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1446 for (i = 0; i < enic->rq_count; i++) 1508 for (i = 0; i < enic->rq_count; i++)
@@ -1762,7 +1824,8 @@ int enic_dev_init(struct enic *enic)
1762 err = enic_set_intr_mode(enic); 1824 err = enic_set_intr_mode(enic);
1763 if (err) { 1825 if (err) {
1764 printk(KERN_ERR PFX 1826 printk(KERN_ERR PFX
1765 "Failed to set intr mode, aborting.\n"); 1827 "Failed to set intr mode based on resource "
1828 "counts and system capabilities, aborting.\n");
1766 return err; 1829 return err;
1767 } 1830 }
1768 1831
@@ -1986,6 +2049,9 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1986 goto err_out_dev_deinit; 2049 goto err_out_dev_deinit;
1987 } 2050 }
1988 2051
2052 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2053 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2054
1989 netdev->netdev_ops = &enic_netdev_ops; 2055 netdev->netdev_ops = &enic_netdev_ops;
1990 netdev->watchdog_timeo = 2 * HZ; 2056 netdev->watchdog_timeo = 2 * HZ;
1991 netdev->ethtool_ops = &enic_ethtool_ops; 2057 netdev->ethtool_ops = &enic_ethtool_ops;
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 32111144efc..02839bf0fe8 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -66,21 +66,21 @@ int enic_get_vnic_config(struct enic *enic)
66 GET_CONFIG(wq_desc_count); 66 GET_CONFIG(wq_desc_count);
67 GET_CONFIG(rq_desc_count); 67 GET_CONFIG(rq_desc_count);
68 GET_CONFIG(mtu); 68 GET_CONFIG(mtu);
69 GET_CONFIG(intr_timer);
70 GET_CONFIG(intr_timer_type); 69 GET_CONFIG(intr_timer_type);
71 GET_CONFIG(intr_mode); 70 GET_CONFIG(intr_mode);
71 GET_CONFIG(intr_timer_usec);
72 72
73 c->wq_desc_count = 73 c->wq_desc_count =
74 min_t(u32, ENIC_MAX_WQ_DESCS, 74 min_t(u32, ENIC_MAX_WQ_DESCS,
75 max_t(u32, ENIC_MIN_WQ_DESCS, 75 max_t(u32, ENIC_MIN_WQ_DESCS,
76 c->wq_desc_count)); 76 c->wq_desc_count));
77 c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */ 77 c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
78 78
79 c->rq_desc_count = 79 c->rq_desc_count =
80 min_t(u32, ENIC_MAX_RQ_DESCS, 80 min_t(u32, ENIC_MAX_RQ_DESCS,
81 max_t(u32, ENIC_MIN_RQ_DESCS, 81 max_t(u32, ENIC_MIN_RQ_DESCS,
82 c->rq_desc_count)); 82 c->rq_desc_count));
83 c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */ 83 c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
84 84
85 if (c->mtu == 0) 85 if (c->mtu == 0)
86 c->mtu = 1500; 86 c->mtu = 1500;
@@ -88,15 +88,17 @@ int enic_get_vnic_config(struct enic *enic)
88 max_t(u16, ENIC_MIN_MTU, 88 max_t(u16, ENIC_MIN_MTU,
89 c->mtu)); 89 c->mtu));
90 90
91 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); 91 c->intr_timer_usec = min_t(u32,
92 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
93 c->intr_timer_usec);
92 94
93 printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n", 95 printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
94 enic->mac_addr, c->wq_desc_count, c->rq_desc_count); 96 enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
95 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d " 97 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
96 "intr timer %d\n", 98 "intr timer %d usec\n",
97 c->mtu, ENIC_SETTING(enic, TXCSUM), 99 c->mtu, ENIC_SETTING(enic, TXCSUM),
98 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO), 100 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
99 ENIC_SETTING(enic, LRO), c->intr_timer); 101 ENIC_SETTING(enic, LRO), c->intr_timer_usec);
100 102
101 return 0; 103 return 0;
102} 104}
@@ -303,7 +305,7 @@ void enic_init_vnic_resources(struct enic *enic)
303 305
304 for (i = 0; i < enic->intr_count; i++) { 306 for (i = 0; i < enic->intr_count; i++) {
305 vnic_intr_init(&enic->intr[i], 307 vnic_intr_init(&enic->intr[i],
306 enic->config.intr_timer, 308 INTR_COALESCE_USEC_TO_HW(enic->config.intr_timer_usec),
307 enic->config.intr_timer_type, 309 enic->config.intr_timer_type,
308 mask_on_assertion); 310 mask_on_assertion);
309 } 311 }
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 29a48e8b59d..69b9b70c7da 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -36,7 +36,6 @@ struct vnic_res {
36}; 36};
37 37
38#define VNIC_DEV_CAP_INIT 0x0001 38#define VNIC_DEV_CAP_INIT 0x0001
39#define VNIC_DEV_CAP_PERBI 0x0002
40 39
41struct vnic_dev { 40struct vnic_dev {
42 void *priv; 41 void *priv;
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 6332ac9391b..8eeb6758491 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -20,6 +20,10 @@
20#ifndef _VNIC_ENIC_H_ 20#ifndef _VNIC_ENIC_H_
21#define _VNIC_ENIC_H_ 21#define _VNIC_ENIC_H_
22 22
23/* Hardware intr coalesce timer is in units of 1.5us */
24#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2/3)
25#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3/2)
26
23/* Device-specific region: enet configuration */ 27/* Device-specific region: enet configuration */
24struct vnic_enet_config { 28struct vnic_enet_config {
25 u32 flags; 29 u32 flags;
@@ -30,6 +34,7 @@ struct vnic_enet_config {
30 u8 intr_timer_type; 34 u8 intr_timer_type;
31 u8 intr_mode; 35 u8 intr_mode;
32 char devname[16]; 36 char devname[16];
37 u32 intr_timer_usec;
33}; 38};
34 39
35#define VENETF_TSO 0x1 /* TSO enabled */ 40#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 1f8786d7195..3934309a949 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -50,12 +50,18 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
50void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, 50void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
51 unsigned int coalescing_type, unsigned int mask_on_assertion) 51 unsigned int coalescing_type, unsigned int mask_on_assertion)
52{ 52{
53 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); 53 vnic_intr_coalescing_timer_set(intr, coalescing_timer);
54 iowrite32(coalescing_type, &intr->ctrl->coalescing_type); 54 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
55 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); 55 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
56 iowrite32(0, &intr->ctrl->int_credits); 56 iowrite32(0, &intr->ctrl->int_credits);
57} 57}
58 58
59void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
60 unsigned int coalescing_timer)
61{
62 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
63}
64
59void vnic_intr_clean(struct vnic_intr *intr) 65void vnic_intr_clean(struct vnic_intr *intr)
60{ 66{
61 iowrite32(0, &intr->ctrl->int_credits); 67 iowrite32(0, &intr->ctrl->int_credits);
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 9a53604edce..2fe6c6339e3 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -61,6 +61,7 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr)
61static inline void vnic_intr_mask(struct vnic_intr *intr) 61static inline void vnic_intr_mask(struct vnic_intr *intr)
62{ 62{
63 iowrite32(1, &intr->ctrl->mask); 63 iowrite32(1, &intr->ctrl->mask);
64 (void)ioread32(&intr->ctrl->mask);
64} 65}
65 66
66static inline void vnic_intr_return_credits(struct vnic_intr *intr, 67static inline void vnic_intr_return_credits(struct vnic_intr *intr,
@@ -101,6 +102,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
101 unsigned int index); 102 unsigned int index);
102void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, 103void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
103 unsigned int coalescing_type, unsigned int mask_on_assertion); 104 unsigned int coalescing_type, unsigned int mask_on_assertion);
105void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
106 unsigned int coalescing_timer);
104void vnic_intr_clean(struct vnic_intr *intr); 107void vnic_intr_clean(struct vnic_intr *intr);
105 108
106#endif /* _VNIC_INTR_H_ */ 109#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index eeaf329945d..cf80ab46d58 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -41,12 +41,12 @@
41#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL 41#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
42#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 42#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
43 43
44#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 0) 44#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
45#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 1) 45#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
46#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 2) 46#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
47#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 3) 47#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
48#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 4) 48#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
49#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 5) 49#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
50 50
51static inline void vnic_set_nic_cfg(u32 *nic_cfg, 51static inline void vnic_set_nic_cfg(u32 *nic_cfg,
52 u8 rss_default_cpu, u8 rss_hash_type, 52 u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 41494f7b2ec..1f8b11449fa 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -167,7 +167,7 @@ static const struct epic_chip_info pci_id_tbl[] = {
167}; 167};
168 168
169 169
170static struct pci_device_id epic_pci_tbl[] = { 170static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
171 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 }, 171 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
172 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 }, 172 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
173 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 173 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index bd1db92aec1..f9d5ca07874 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -904,7 +904,7 @@ static int ethoc_probe(struct platform_device *pdev)
904 } 904 }
905 905
906 mmio = devm_request_mem_region(&pdev->dev, res->start, 906 mmio = devm_request_mem_region(&pdev->dev, res->start,
907 res->end - res->start + 1, res->name); 907 resource_size(res), res->name);
908 if (!mmio) { 908 if (!mmio) {
909 dev_err(&pdev->dev, "cannot request I/O memory space\n"); 909 dev_err(&pdev->dev, "cannot request I/O memory space\n");
910 ret = -ENXIO; 910 ret = -ENXIO;
@@ -917,7 +917,7 @@ static int ethoc_probe(struct platform_device *pdev)
917 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 917 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
918 if (res) { 918 if (res) {
919 mem = devm_request_mem_region(&pdev->dev, res->start, 919 mem = devm_request_mem_region(&pdev->dev, res->start,
920 res->end - res->start + 1, res->name); 920 resource_size(res), res->name);
921 if (!mem) { 921 if (!mem) {
922 dev_err(&pdev->dev, "cannot request memory space\n"); 922 dev_err(&pdev->dev, "cannot request memory space\n");
923 ret = -ENXIO; 923 ret = -ENXIO;
@@ -945,7 +945,7 @@ static int ethoc_probe(struct platform_device *pdev)
945 priv->dma_alloc = 0; 945 priv->dma_alloc = 0;
946 946
947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
948 mmio->end - mmio->start + 1); 948 resource_size(mmio));
949 if (!priv->iobase) { 949 if (!priv->iobase) {
950 dev_err(&pdev->dev, "cannot remap I/O memory space\n"); 950 dev_err(&pdev->dev, "cannot remap I/O memory space\n");
951 ret = -ENXIO; 951 ret = -ENXIO;
@@ -954,7 +954,7 @@ static int ethoc_probe(struct platform_device *pdev)
954 954
955 if (netdev->mem_end) { 955 if (netdev->mem_end) {
956 priv->membase = devm_ioremap_nocache(&pdev->dev, 956 priv->membase = devm_ioremap_nocache(&pdev->dev,
957 netdev->mem_start, mem->end - mem->start + 1); 957 netdev->mem_start, resource_size(mem));
958 if (!priv->membase) { 958 if (!priv->membase) {
959 dev_err(&pdev->dev, "cannot remap memory space\n"); 959 dev_err(&pdev->dev, "cannot remap memory space\n");
960 ret = -ENXIO; 960 ret = -ENXIO;
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index dac4e595589..e6a98129d78 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1941,7 +1941,7 @@ static int netdev_close(struct net_device *dev)
1941 return 0; 1941 return 0;
1942} 1942}
1943 1943
1944static struct pci_device_id fealnx_pci_tbl[] = { 1944static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
1945 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1945 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1946 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 1946 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1947 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 1947 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3c340489804..3eb713b014f 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -6198,7 +6198,7 @@ static void nv_shutdown(struct pci_dev *pdev)
6198#define nv_resume NULL 6198#define nv_resume NULL
6199#endif /* CONFIG_PM */ 6199#endif /* CONFIG_PM */
6200 6200
6201static struct pci_device_id pci_tbl[] = { 6201static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
6202 { /* nForce Ethernet Controller */ 6202 { /* nForce Ethernet Controller */
6203 PCI_DEVICE(0x10DE, 0x01C3), 6203 PCI_DEVICE(0x10DE, 0x01C3),
6204 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, 6204 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index ea85075a89a..dd72c5025e6 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1990,7 +1990,7 @@ static void __devexit hamachi_remove_one (struct pci_dev *pdev)
1990 } 1990 }
1991} 1991}
1992 1992
1993static struct pci_device_id hamachi_pci_tbl[] = { 1993static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
1994 { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, }, 1994 { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
1995 { 0, } 1995 { 0, }
1996}; 1996};
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 90f890e7c5e..0c2f2e8b1c4 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -210,7 +210,7 @@ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
210#endif 210#endif
211 211
212#ifdef CONFIG_PCI 212#ifdef CONFIG_PCI
213static struct pci_device_id hp100_pci_tbl[] = { 213static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = {
214 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, 214 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
215 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, 215 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
216 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,}, 216 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 997124d2992..0a064ce3eb4 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -60,7 +60,7 @@ static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info, 60 [board_82575] = &e1000_82575_info,
61}; 61};
62 62
63static struct pci_device_id igb_pci_tbl[] = { 63static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -2905,12 +2905,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
2905 int count = 0; 2905 int count = 0;
2906 2906
2907 /* return ENOMEM indicating insufficient memory for addresses */ 2907 /* return ENOMEM indicating insufficient memory for addresses */
2908 if (netdev->uc.count > rar_entries) 2908 if (netdev_uc_count(netdev) > rar_entries)
2909 return -ENOMEM; 2909 return -ENOMEM;
2910 2910
2911 if (netdev->uc.count && rar_entries) { 2911 if (!netdev_uc_empty(netdev) && rar_entries) {
2912 struct netdev_hw_addr *ha; 2912 struct netdev_hw_addr *ha;
2913 list_for_each_entry(ha, &netdev->uc.list, list) { 2913
2914 netdev_for_each_uc_addr(ha, netdev) {
2914 if (!rar_entries) 2915 if (!rar_entries)
2915 break; 2916 break;
2916 igb_rar_set_qsel(adapter, ha->addr, 2917 igb_rar_set_qsel(adapter, ha->addr,
@@ -4105,6 +4106,9 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4105 u32 icr = rd32(E1000_ICR); 4106 u32 icr = rd32(E1000_ICR);
4106 /* reading ICR causes bit 31 of EICR to be cleared */ 4107 /* reading ICR causes bit 31 of EICR to be cleared */
4107 4108
4109 if (icr & E1000_ICR_DRSTA)
4110 schedule_work(&adapter->reset_task);
4111
4108 if (icr & E1000_ICR_DOUTSYNC) { 4112 if (icr & E1000_ICR_DOUTSYNC) {
4109 /* HW is reporting DMA is out of sync */ 4113 /* HW is reporting DMA is out of sync */
4110 adapter->stats.doosync++; 4114 adapter->stats.doosync++;
@@ -4728,6 +4732,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4728 4732
4729 igb_write_itr(q_vector); 4733 igb_write_itr(q_vector);
4730 4734
4735 if (icr & E1000_ICR_DRSTA)
4736 schedule_work(&adapter->reset_task);
4737
4731 if (icr & E1000_ICR_DOUTSYNC) { 4738 if (icr & E1000_ICR_DOUTSYNC) {
4732 /* HW is reporting DMA is out of sync */ 4739 /* HW is reporting DMA is out of sync */
4733 adapter->stats.doosync++; 4740 adapter->stats.doosync++;
@@ -4767,6 +4774,9 @@ static irqreturn_t igb_intr(int irq, void *data)
4767 if (!(icr & E1000_ICR_INT_ASSERTED)) 4774 if (!(icr & E1000_ICR_INT_ASSERTED))
4768 return IRQ_NONE; 4775 return IRQ_NONE;
4769 4776
4777 if (icr & E1000_ICR_DRSTA)
4778 schedule_work(&adapter->reset_task);
4779
4770 if (icr & E1000_ICR_DOUTSYNC) { 4780 if (icr & E1000_ICR_DOUTSYNC) {
4771 /* HW is reporting DMA is out of sync */ 4781 /* HW is reporting DMA is out of sync */
4772 adapter->stats.doosync++; 4782 adapter->stats.doosync++;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 2aa71a766c3..23ce07d3de0 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2609,11 +2609,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2609 struct pci_dev *pdev = adapter->pdev; 2609 struct pci_dev *pdev = adapter->pdev;
2610 2610
2611 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); 2611 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2612 dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n", 2612 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2613 /* MAC address */
2614 netdev->dev_addr[0], netdev->dev_addr[1],
2615 netdev->dev_addr[2], netdev->dev_addr[3],
2616 netdev->dev_addr[4], netdev->dev_addr[5]);
2617 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); 2613 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
2618} 2614}
2619 2615
@@ -2779,11 +2775,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2779 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2775 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
2780 2776
2781 if (!is_valid_ether_addr(netdev->perm_addr)) { 2777 if (!is_valid_ether_addr(netdev->perm_addr)) {
2782 dev_err(&pdev->dev, "Invalid MAC Address: " 2778 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2783 "%02x:%02x:%02x:%02x:%02x:%02x\n", 2779 netdev->dev_addr);
2784 netdev->dev_addr[0], netdev->dev_addr[1],
2785 netdev->dev_addr[2], netdev->dev_addr[3],
2786 netdev->dev_addr[4], netdev->dev_addr[5]);
2787 err = -EIO; 2780 err = -EIO;
2788 goto err_hw_init; 2781 goto err_hw_init;
2789 } 2782 }
@@ -2885,7 +2878,7 @@ static struct pci_error_handlers igbvf_err_handler = {
2885 .resume = igbvf_io_resume, 2878 .resume = igbvf_io_resume,
2886}; 2879};
2887 2880
2888static struct pci_device_id igbvf_pci_tbl[] = { 2881static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
2889 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, 2882 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2890 { } /* terminate list */ 2883 { } /* terminate list */
2891}; 2884};
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8ec15ab8c8c..81a4c5d3073 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1383,7 +1383,7 @@ static void __devexit ioc3_remove_one (struct pci_dev *pdev)
1383 */ 1383 */
1384} 1384}
1385 1385
1386static struct pci_device_id ioc3_pci_tbl[] = { 1386static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
1387 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID }, 1387 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1388 { 0 } 1388 { 0 }
1389}; 1389};
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index ba8d246d05a..49f35e2ed19 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -92,7 +92,7 @@ static const char *ipg_brand_name[] = {
92 "D-Link NIC IP1000A" 92 "D-Link NIC IP1000A"
93}; 93};
94 94
95static struct pci_device_id ipg_pci_tbl[] __devinitdata = { 95static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, 96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, 97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, 98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index f7638422142..af10e97345c 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -64,6 +64,16 @@ endchoice
64 64
65comment "Dongle support" 65comment "Dongle support"
66 66
67config SH_SIR
68 tristate "SuperH SIR on UART"
69 depends on IRDA && SUPERH && \
70 (CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7723 || \
71 CPU_SUBTYPE_SH7724)
72 default n
73 help
74 Say Y here if your want to enable SIR function on SuperH UART
75 devices.
76
67config DONGLE 77config DONGLE
68 bool "Serial dongle support" 78 bool "Serial dongle support"
69 depends on IRTTY_SIR 79 depends on IRTTY_SIR
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index d82e1e3bd8c..e030d47e279 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
22# SIR drivers 22# SIR drivers
23obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o 23obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
24obj-$(CONFIG_BFIN_SIR) += bfin_sir.o 24obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
25obj-$(CONFIG_SH_SIR) += sh_sir.o
25# dongle drivers for SIR drivers 26# dongle drivers for SIR drivers
26obj-$(CONFIG_ESI_DONGLE) += esi-sir.o 27obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
27obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o 28obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 2d7b5c1d557..b7e6625ca75 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -184,7 +184,7 @@
184#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC 184#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
185#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX 185#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
186 186
187static struct pci_device_id toshoboe_pci_tbl[] = { 187static DEFINE_PCI_DEVICE_TABLE(toshoboe_pci_tbl) = {
188 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, }, 188 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
189 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, }, 189 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
190 { } /* Terminating entry */ 190 { } /* Terminating entry */
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
new file mode 100644
index 00000000000..d7c983dc91a
--- /dev/null
+++ b/drivers/net/irda/sh_sir.c
@@ -0,0 +1,823 @@
1/*
2 * SuperH IrDA Driver
3 *
4 * Copyright (C) 2009 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on bfin_sir.c
8 * Copyright 2006-2009 Analog Devices Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <net/irda/wrapper.h>
18#include <net/irda/irda_device.h>
19#include <asm/clock.h>
20
21#define DRIVER_NAME "sh_sir"
22
23#define RX_PHASE (1 << 0)
24#define TX_PHASE (1 << 1)
25#define TX_COMP_PHASE (1 << 2) /* tx complete */
26#define NONE_PHASE (1 << 31)
27
28#define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */
29#define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */
30#define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */
31#define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */
32#define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */
33#define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */
34#define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */
35#define IRIF_SIR_EOF 0x002A /* EOF value */
36#define IRIF_SIR_FLG 0x002C /* Flag clear */
37#define IRIF_UART_STS2 0x002E /* UART status 2 */
38#define IRIF_UART0 0x0030 /* UART control */
39#define IRIF_UART1 0x0032 /* UART status */
40#define IRIF_UART2 0x0034 /* UART mode */
41#define IRIF_UART3 0x0036 /* UART transmit data */
42#define IRIF_UART4 0x0038 /* UART receive data */
43#define IRIF_UART5 0x003A /* UART interrupt mask */
44#define IRIF_UART6 0x003C /* UART baud rate error correction */
45#define IRIF_UART7 0x003E /* UART baud rate count set */
46#define IRIF_CRC0 0x0040 /* CRC engine control */
47#define IRIF_CRC1 0x0042 /* CRC engine input data */
48#define IRIF_CRC2 0x0044 /* CRC engine calculation */
49#define IRIF_CRC3 0x0046 /* CRC engine output data 1 */
50#define IRIF_CRC4 0x0048 /* CRC engine output data 2 */
51
52/* IRIF_SIR0 */
53#define IRTPW (1 << 1) /* transmit pulse width select */
54#define IRERRC (1 << 0) /* Clear receive pulse width error */
55
56/* IRIF_SIR3 */
57#define IRERR (1 << 0) /* received pulse width Error */
58
59/* IRIF_SIR_FRM */
60#define EOFD (1 << 9) /* EOF detection flag */
61#define FRER (1 << 8) /* Frame Error bit */
62#define FRP (1 << 0) /* Frame processing set */
63
64/* IRIF_UART_STS2 */
65#define IRSME (1 << 6) /* Receive Sum Error flag */
66#define IROVE (1 << 5) /* Receive Overrun Error flag */
67#define IRFRE (1 << 4) /* Receive Framing Error flag */
68#define IRPRE (1 << 3) /* Receive Parity Error flag */
69
70/* IRIF_UART0_*/
71#define TBEC (1 << 2) /* Transmit Data Clear */
72#define RIE (1 << 1) /* Receive Enable */
73#define TIE (1 << 0) /* Transmit Enable */
74
75/* IRIF_UART1 */
76#define URSME (1 << 6) /* Receive Sum Error Flag */
77#define UROVE (1 << 5) /* Receive Overrun Error Flag */
78#define URFRE (1 << 4) /* Receive Framing Error Flag */
79#define URPRE (1 << 3) /* Receive Parity Error Flag */
80#define RBF (1 << 2) /* Receive Buffer Full Flag */
81#define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */
82#define TBE (1 << 0) /* Transmit Buffer Empty flag */
83#define TBCOMP (TSBE | TBE)
84
85/* IRIF_UART5 */
86#define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */
87#define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */
88#define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
89#define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
90#define RX_MASK (RSEIM | RBFIM)
91
92/* IRIF_CRC0 */
93#define CRC_RST (1 << 15) /* CRC Engine Reset */
94#define CRC_CT_MASK 0x0FFF
95
96/************************************************************************
97
98
99 structure
100
101
102************************************************************************/
103struct sh_sir_self {
104 void __iomem *membase;
105 unsigned int irq;
106 struct clk *clk;
107
108 struct net_device *ndev;
109
110 struct irlap_cb *irlap;
111 struct qos_info qos;
112
113 iobuff_t tx_buff;
114 iobuff_t rx_buff;
115};
116
117/************************************************************************
118
119
120 common function
121
122
123************************************************************************/
124static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
125{
126 iowrite16(data, self->membase + offset);
127}
128
129static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
130{
131 return ioread16(self->membase + offset);
132}
133
134static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
135 u16 mask, u16 data)
136{
137 u16 old, new;
138
139 old = sh_sir_read(self, offset);
140 new = (old & ~mask) | data;
141 if (old != new)
142 sh_sir_write(self, offset, new);
143}
144
145/************************************************************************
146
147
148 CRC function
149
150
151************************************************************************/
152static void sh_sir_crc_reset(struct sh_sir_self *self)
153{
154 sh_sir_write(self, IRIF_CRC0, CRC_RST);
155}
156
157static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
158{
159 sh_sir_write(self, IRIF_CRC1, (u16)data);
160}
161
162static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
163{
164 return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
165}
166
167static u16 sh_sir_crc_out(struct sh_sir_self *self)
168{
169 return sh_sir_read(self, IRIF_CRC4);
170}
171
172static int sh_sir_crc_init(struct sh_sir_self *self)
173{
174 struct device *dev = &self->ndev->dev;
175 int ret = -EIO;
176 u16 val;
177
178 sh_sir_crc_reset(self);
179
180 sh_sir_crc_add(self, 0xCC);
181 sh_sir_crc_add(self, 0xF5);
182 sh_sir_crc_add(self, 0xF1);
183 sh_sir_crc_add(self, 0xA7);
184
185 val = sh_sir_crc_cnt(self);
186 if (4 != val) {
187 dev_err(dev, "CRC count error %x\n", val);
188 goto crc_init_out;
189 }
190
191 val = sh_sir_crc_out(self);
192 if (0x51DF != val) {
193 dev_err(dev, "CRC result error%x\n", val);
194 goto crc_init_out;
195 }
196
197 ret = 0;
198
199crc_init_out:
200
201 sh_sir_crc_reset(self);
202 return ret;
203}
204
205/************************************************************************
206
207
208 baud rate functions
209
210
211************************************************************************/
212#define SCLK_BASE 1843200 /* 1.8432MHz */
213
214static u32 sh_sir_find_sclk(struct clk *irda_clk)
215{
216 struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
217 struct clk *pclk = clk_get(NULL, "peripheral_clk");
218 u32 limit, min = 0xffffffff, tmp;
219 int i, index = 0;
220
221 limit = clk_get_rate(pclk);
222 clk_put(pclk);
223
224 /* IrDA can not set over peripheral_clk */
225 for (i = 0;
226 freq_table[i].frequency != CPUFREQ_TABLE_END;
227 i++) {
228 u32 freq = freq_table[i].frequency;
229
230 if (freq == CPUFREQ_ENTRY_INVALID)
231 continue;
232
233 /* IrDA should not over peripheral_clk */
234 if (freq > limit)
235 continue;
236
237 tmp = freq % SCLK_BASE;
238 if (tmp < min) {
239 min = tmp;
240 index = i;
241 }
242 }
243
244 return freq_table[index].frequency;
245}
246
247#define ERR_ROUNDING(a) ((a + 5000) / 10000)
248static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
249{
250 struct clk *clk;
251 struct device *dev = &self->ndev->dev;
252 u32 rate;
253 u16 uabca, uabc;
254 u16 irbca, irbc;
255 u32 min, rerr, tmp;
256 int i;
257
258 /* Baud Rate Error Correction x 10000 */
259 u32 rate_err_array[] = {
260 0000, 0625, 1250, 1875,
261 2500, 3125, 3750, 4375,
262 5000, 5625, 6250, 6875,
263 7500, 8125, 8750, 9375,
264 };
265
266 /*
267 * FIXME
268 *
269 * it support 9600 only now
270 */
271 switch (baudrate) {
272 case 9600:
273 break;
274 default:
275 dev_err(dev, "un-supported baudrate %d\n", baudrate);
276 return -EIO;
277 }
278
279 clk = clk_get(NULL, "irda_clk");
280 if (!clk) {
281 dev_err(dev, "can not get irda_clk\n");
282 return -EIO;
283 }
284
285 clk_set_rate(clk, sh_sir_find_sclk(clk));
286 rate = clk_get_rate(clk);
287 clk_put(clk);
288
289 dev_dbg(dev, "selected sclk = %d\n", rate);
290
291 /*
292 * CALCULATION
293 *
294 * 1843200 = system rate / (irbca + (irbc + 1))
295 */
296
297 irbc = rate / SCLK_BASE;
298
299 tmp = rate - (SCLK_BASE * irbc);
300 tmp *= 10000;
301
302 rerr = tmp / SCLK_BASE;
303
304 min = 0xffffffff;
305 irbca = 0;
306 for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
307 tmp = abs(rate_err_array[i] - rerr);
308 if (min > tmp) {
309 min = tmp;
310 irbca = i;
311 }
312 }
313
314 tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
315 if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
316 dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
317
318 dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
319 SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
320
321 irbca = (irbca & 0xF) << 4;
322 irbc = (irbc - 1) & 0xF;
323
324 if (!irbc) {
325 dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
326 return -EIO;
327 }
328
329 sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
330 sh_sir_write(self, IRIF_SIR1, irbca);
331 sh_sir_write(self, IRIF_SIR2, irbc);
332
333 /*
334 * CALCULATION
335 *
336 * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
337 */
338
339 uabc = rate / baudrate;
340 uabc = (uabc / 16) - 1;
341 uabc = (uabc + 1) * 16;
342
343 tmp = rate - (uabc * baudrate);
344 tmp *= 10000;
345
346 rerr = tmp / baudrate;
347
348 min = 0xffffffff;
349 uabca = 0;
350 for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
351 tmp = abs(rate_err_array[i] - rerr);
352 if (min > tmp) {
353 min = tmp;
354 uabca = i;
355 }
356 }
357
358 tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
359 if ((baudrate / 100) < abs(tmp - baudrate))
360 dev_warn(dev, "UART freq error margin over %d\n", tmp);
361
362 dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
363 baudrate, tmp,
364 uabc, rate_err_array[uabca]);
365
366 uabca = (uabca & 0xF) << 4;
367 uabc = (uabc / 16) - 1;
368
369 sh_sir_write(self, IRIF_UART6, uabca);
370 sh_sir_write(self, IRIF_UART7, uabc);
371
372 return 0;
373}
374
375/************************************************************************
376
377
378 iobuf function
379
380
381************************************************************************/
382static int __sh_sir_init_iobuf(iobuff_t *io, int size)
383{
384 io->head = kmalloc(size, GFP_KERNEL);
385 if (!io->head)
386 return -ENOMEM;
387
388 io->truesize = size;
389 io->in_frame = FALSE;
390 io->state = OUTSIDE_FRAME;
391 io->data = io->head;
392
393 return 0;
394}
395
396static void sh_sir_remove_iobuf(struct sh_sir_self *self)
397{
398 kfree(self->rx_buff.head);
399 kfree(self->tx_buff.head);
400
401 self->rx_buff.head = NULL;
402 self->tx_buff.head = NULL;
403}
404
405static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
406{
407 int err = -ENOMEM;
408
409 if (self->rx_buff.head ||
410 self->tx_buff.head) {
411 dev_err(&self->ndev->dev, "iobuff has already existed.");
412 return err;
413 }
414
415 err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
416 if (err)
417 goto iobuf_err;
418
419 err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
420
421iobuf_err:
422 if (err)
423 sh_sir_remove_iobuf(self);
424
425 return err;
426}
427
428/************************************************************************
429
430
431 status function
432
433
434************************************************************************/
435static void sh_sir_clear_all_err(struct sh_sir_self *self)
436{
437 /* Clear error flag for receive pulse width */
438 sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
439
440 /* Clear frame / EOF error flag */
441 sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
442
443 /* Clear all status error */
444 sh_sir_write(self, IRIF_UART_STS2, 0);
445}
446
447static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
448{
449 u16 uart5 = 0;
450 u16 uart0 = 0;
451
452 switch (phase) {
453 case TX_PHASE:
454 uart5 = TBEIM;
455 uart0 = TBEC | TIE;
456 break;
457 case TX_COMP_PHASE:
458 uart5 = TSBEIM;
459 uart0 = TIE;
460 break;
461 case RX_PHASE:
462 uart5 = RX_MASK;
463 uart0 = RIE;
464 break;
465 default:
466 break;
467 }
468
469 sh_sir_write(self, IRIF_UART5, uart5);
470 sh_sir_write(self, IRIF_UART0, uart0);
471}
472
473static int sh_sir_is_which_phase(struct sh_sir_self *self)
474{
475 u16 val = sh_sir_read(self, IRIF_UART5);
476
477 if (val & TBEIM)
478 return TX_PHASE;
479
480 if (val & TSBEIM)
481 return TX_COMP_PHASE;
482
483 if (val & RX_MASK)
484 return RX_PHASE;
485
486 return NONE_PHASE;
487}
488
489static void sh_sir_tx(struct sh_sir_self *self, int phase)
490{
491 switch (phase) {
492 case TX_PHASE:
493 if (0 >= self->tx_buff.len) {
494 sh_sir_set_phase(self, TX_COMP_PHASE);
495 } else {
496 sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
497 self->tx_buff.len--;
498 self->tx_buff.data++;
499 }
500 break;
501 case TX_COMP_PHASE:
502 sh_sir_set_phase(self, RX_PHASE);
503 netif_wake_queue(self->ndev);
504 break;
505 default:
506 dev_err(&self->ndev->dev, "should not happen\n");
507 break;
508 }
509}
510
511static int sh_sir_read_data(struct sh_sir_self *self)
512{
513 u16 val;
514 int timeout = 1024;
515
516 while (timeout--) {
517 val = sh_sir_read(self, IRIF_UART1);
518
519 /* data get */
520 if (val & RBF) {
521 if (val & (URSME | UROVE | URFRE | URPRE))
522 break;
523
524 return (int)sh_sir_read(self, IRIF_UART4);
525 }
526
527 udelay(1);
528 }
529
530 dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
531 val, sh_sir_read(self, IRIF_UART_STS2));
532
533 /* read data register for clear error */
534 sh_sir_read(self, IRIF_UART4);
535
536 return -1;
537}
538
539static void sh_sir_rx(struct sh_sir_self *self)
540{
541 int timeout = 1024;
542 int data;
543
544 while (timeout--) {
545 data = sh_sir_read_data(self);
546 if (data < 0)
547 break;
548
549 async_unwrap_char(self->ndev, &self->ndev->stats,
550 &self->rx_buff, (u8)data);
551 self->ndev->last_rx = jiffies;
552
553 if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
554 continue;
555
556 break;
557 }
558}
559
560static irqreturn_t sh_sir_irq(int irq, void *dev_id)
561{
562 struct sh_sir_self *self = dev_id;
563 struct device *dev = &self->ndev->dev;
564 int phase = sh_sir_is_which_phase(self);
565
566 switch (phase) {
567 case TX_COMP_PHASE:
568 case TX_PHASE:
569 sh_sir_tx(self, phase);
570 break;
571 case RX_PHASE:
572 if (sh_sir_read(self, IRIF_SIR3))
573 dev_err(dev, "rcv pulse width error occurred\n");
574
575 sh_sir_rx(self);
576 sh_sir_clear_all_err(self);
577 break;
578 default:
579 dev_err(dev, "unknown interrupt\n");
580 }
581
582 return IRQ_HANDLED;
583}
584
585/************************************************************************
586
587
588 net_device_ops function
589
590
591************************************************************************/
592static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
593{
594 struct sh_sir_self *self = netdev_priv(ndev);
595 int speed = irda_get_next_speed(skb);
596
597 if ((0 < speed) &&
598 (9600 != speed)) {
599 dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
600 return -EIO;
601 }
602
603 netif_stop_queue(ndev);
604
605 self->tx_buff.data = self->tx_buff.head;
606 self->tx_buff.len = 0;
607 if (skb->len)
608 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
609 self->tx_buff.truesize);
610
611 sh_sir_set_phase(self, TX_PHASE);
612 dev_kfree_skb(skb);
613
614 return 0;
615}
616
617static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
618{
619 /*
620 * FIXME
621 *
622 * This function is needed for irda framework.
623 * But nothing to do now
624 */
625 return 0;
626}
627
628static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
629{
630 struct sh_sir_self *self = netdev_priv(ndev);
631
632 return &self->ndev->stats;
633}
634
635static int sh_sir_open(struct net_device *ndev)
636{
637 struct sh_sir_self *self = netdev_priv(ndev);
638 int err;
639
640 clk_enable(self->clk);
641 err = sh_sir_crc_init(self);
642 if (err)
643 goto open_err;
644
645 sh_sir_set_baudrate(self, 9600);
646
647 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
648 if (!self->irlap)
649 goto open_err;
650
651 /*
652 * Now enable the interrupt then start the queue
653 */
654 sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
655 sh_sir_read(self, IRIF_UART1); /* flag clear */
656 sh_sir_read(self, IRIF_UART4); /* flag clear */
657 sh_sir_set_phase(self, RX_PHASE);
658
659 netif_start_queue(ndev);
660
661 dev_info(&self->ndev->dev, "opened\n");
662
663 return 0;
664
665open_err:
666 clk_disable(self->clk);
667
668 return err;
669}
670
671static int sh_sir_stop(struct net_device *ndev)
672{
673 struct sh_sir_self *self = netdev_priv(ndev);
674
675 /* Stop IrLAP */
676 if (self->irlap) {
677 irlap_close(self->irlap);
678 self->irlap = NULL;
679 }
680
681 netif_stop_queue(ndev);
682
683 dev_info(&ndev->dev, "stoped\n");
684
685 return 0;
686}
687
688static const struct net_device_ops sh_sir_ndo = {
689 .ndo_open = sh_sir_open,
690 .ndo_stop = sh_sir_stop,
691 .ndo_start_xmit = sh_sir_hard_xmit,
692 .ndo_do_ioctl = sh_sir_ioctl,
693 .ndo_get_stats = sh_sir_stats,
694};
695
696/************************************************************************
697
698
699 platform_driver function
700
701
702************************************************************************/
703static int __devinit sh_sir_probe(struct platform_device *pdev)
704{
705 struct net_device *ndev;
706 struct sh_sir_self *self;
707 struct resource *res;
708 char clk_name[8];
709 void __iomem *base;
710 unsigned int irq;
711 int err = -ENOMEM;
712
713 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
714 irq = platform_get_irq(pdev, 0);
715 if (!res || irq < 0) {
716 dev_err(&pdev->dev, "Not enough platform resources.\n");
717 goto exit;
718 }
719
720 ndev = alloc_irdadev(sizeof(*self));
721 if (!ndev)
722 goto exit;
723
724 base = ioremap_nocache(res->start, resource_size(res));
725 if (!base) {
726 err = -ENXIO;
727 dev_err(&pdev->dev, "Unable to ioremap.\n");
728 goto err_mem_1;
729 }
730
731 self = netdev_priv(ndev);
732 err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
733 if (err)
734 goto err_mem_2;
735
736 snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
737 self->clk = clk_get(&pdev->dev, clk_name);
738 if (IS_ERR(self->clk)) {
739 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
740 goto err_mem_3;
741 }
742
743 irda_init_max_qos_capabilies(&self->qos);
744
745 ndev->netdev_ops = &sh_sir_ndo;
746 ndev->irq = irq;
747
748 self->membase = base;
749 self->ndev = ndev;
750 self->qos.baud_rate.bits &= IR_9600; /* FIXME */
751 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
752
753 irda_qos_bits_to_value(&self->qos);
754
755 err = register_netdev(ndev);
756 if (err)
757 goto err_mem_4;
758
759 platform_set_drvdata(pdev, ndev);
760
761 if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) {
762 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
763 goto err_mem_4;
764 }
765
766 dev_info(&pdev->dev, "SuperH IrDA probed\n");
767
768 goto exit;
769
770err_mem_4:
771 clk_put(self->clk);
772err_mem_3:
773 sh_sir_remove_iobuf(self);
774err_mem_2:
775 iounmap(self->membase);
776err_mem_1:
777 free_netdev(ndev);
778exit:
779 return err;
780}
781
782static int __devexit sh_sir_remove(struct platform_device *pdev)
783{
784 struct net_device *ndev = platform_get_drvdata(pdev);
785 struct sh_sir_self *self = netdev_priv(ndev);
786
787 if (!self)
788 return 0;
789
790 unregister_netdev(ndev);
791 clk_put(self->clk);
792 sh_sir_remove_iobuf(self);
793 iounmap(self->membase);
794 free_netdev(ndev);
795 platform_set_drvdata(pdev, NULL);
796
797 return 0;
798}
799
800static struct platform_driver sh_sir_driver = {
801 .probe = sh_sir_probe,
802 .remove = __devexit_p(sh_sir_remove),
803 .driver = {
804 .name = DRIVER_NAME,
805 },
806};
807
808static int __init sh_sir_init(void)
809{
810 return platform_driver_register(&sh_sir_driver);
811}
812
813static void __exit sh_sir_exit(void)
814{
815 platform_driver_unregister(&sh_sir_driver);
816}
817
818module_init(sh_sir_init);
819module_exit(sh_sir_exit);
820
821MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
822MODULE_DESCRIPTION("SuperH IrDA driver");
823MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index fddb4efd545..6533c010cf5 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -121,7 +121,7 @@ static void iodelay(int udelay)
121 } 121 }
122} 122}
123 123
124static struct pci_device_id via_pci_tbl[] = { 124static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
125 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 }, 125 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
126 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 }, 126 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
127 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 }, 127 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bd3c6b5ee76..209d4bcface 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -59,7 +59,7 @@ MODULE_LICENSE("GPL");
59 59
60static /* const */ char drivername[] = DRIVER_NAME; 60static /* const */ char drivername[] = DRIVER_NAME;
61 61
62static struct pci_device_id vlsi_irda_table [] = { 62static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = {
63 { 63 {
64 .class = PCI_CLASS_WIRELESS_IRDA << 8, 64 .class = PCI_CLASS_WIRELESS_IRDA << 8,
65 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 65 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 593d1a4f217..c56ea69762c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(copybreak,
50 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 50 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
51 * Class, Class Mask, private data (not used) } 51 * Class, Class Mask, private data (not used) }
52 */ 52 */
53static struct pci_device_id ixgb_pci_tbl[] = { 53static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, 54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4, 56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index bfef0ebcba9..8f81efb4916 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -33,7 +33,8 @@
33obj-$(CONFIG_IXGBE) += ixgbe.o 33obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o
37 38
38ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
39 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 303e7bd39b6..e576fb4740b 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -98,6 +98,22 @@
98 98
99#define IXGBE_MAX_RSC_INT_RATE 162760 99#define IXGBE_MAX_RSC_INT_RATE 162760
100 100
101#define IXGBE_MAX_VF_MC_ENTRIES 30
102#define IXGBE_MAX_VF_FUNCTIONS 64
103#define IXGBE_MAX_VFTA_ENTRIES 128
104#define MAX_EMULATION_MAC_ADDRS 16
105#define VMDQ_P(p) ((p) + adapter->num_vfs)
106
107struct vf_data_storage {
108 unsigned char vf_mac_addresses[ETH_ALEN];
109 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
110 u16 num_vf_mc_hashes;
111 u16 default_vf_vlan_id;
112 u16 vlans_enabled;
113 bool clear_to_send;
114 int rar;
115};
116
101/* wrapper around a pointer to a socket buffer, 117/* wrapper around a pointer to a socket buffer,
102 * so a DMA handle can be stored along with the buffer */ 118 * so a DMA handle can be stored along with the buffer */
103struct ixgbe_tx_buffer { 119struct ixgbe_tx_buffer {
@@ -171,7 +187,7 @@ struct ixgbe_ring {
171enum ixgbe_ring_f_enum { 187enum ixgbe_ring_f_enum {
172 RING_F_NONE = 0, 188 RING_F_NONE = 0,
173 RING_F_DCB, 189 RING_F_DCB,
174 RING_F_VMDQ, 190 RING_F_VMDQ, /* SR-IOV uses the same ring feature */
175 RING_F_RSS, 191 RING_F_RSS,
176 RING_F_FDIR, 192 RING_F_FDIR,
177#ifdef IXGBE_FCOE 193#ifdef IXGBE_FCOE
@@ -183,7 +199,7 @@ enum ixgbe_ring_f_enum {
183 199
184#define IXGBE_MAX_DCB_INDICES 8 200#define IXGBE_MAX_DCB_INDICES 8
185#define IXGBE_MAX_RSS_INDICES 16 201#define IXGBE_MAX_RSS_INDICES 16
186#define IXGBE_MAX_VMDQ_INDICES 16 202#define IXGBE_MAX_VMDQ_INDICES 64
187#define IXGBE_MAX_FDIR_INDICES 64 203#define IXGBE_MAX_FDIR_INDICES 64
188#ifdef IXGBE_FCOE 204#ifdef IXGBE_FCOE
189#define IXGBE_MAX_FCOE_INDICES 8 205#define IXGBE_MAX_FCOE_INDICES 8
@@ -288,6 +304,8 @@ struct ixgbe_adapter {
288 /* RX */ 304 /* RX */
289 struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */ 305 struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
290 int num_rx_queues; 306 int num_rx_queues;
307 int num_rx_pools; /* == num_rx_queues in 82598 */
308 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
291 u64 hw_csum_rx_error; 309 u64 hw_csum_rx_error;
292 u64 hw_rx_no_dma_resources; 310 u64 hw_rx_no_dma_resources;
293 u64 non_eop_descs; 311 u64 non_eop_descs;
@@ -330,6 +348,8 @@ struct ixgbe_adapter {
330#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) 348#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
331#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28) 349#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28)
332#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) 350#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
351#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 30)
352#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 31)
333 353
334 u32 flags2; 354 u32 flags2;
335#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) 355#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
@@ -379,6 +399,11 @@ struct ixgbe_adapter {
379 u64 rsc_total_flush; 399 u64 rsc_total_flush;
380 u32 wol; 400 u32 wol;
381 u16 eeprom_version; 401 u16 eeprom_version;
402
403 /* SR-IOV */
404 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
405 unsigned int num_vfs;
406 struct vf_data_storage *vfinfo;
382}; 407};
383 408
384enum ixbge_state_t { 409enum ixbge_state_t {
@@ -440,6 +465,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
440 u16 flex_byte); 465 u16 flex_byte);
441extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, 466extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
442 u8 l4type); 467 u8 l4type);
468extern void ixgbe_set_rx_mode(struct net_device *netdev);
443#ifdef IXGBE_FCOE 469#ifdef IXGBE_FCOE
444extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 470extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
445extern int ixgbe_fso(struct ixgbe_adapter *adapter, 471extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b49bd6b9feb..d4ed6adb797 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -31,6 +31,7 @@
31 31
32#include "ixgbe.h" 32#include "ixgbe.h"
33#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
34#include "ixgbe_mbx.h"
34 35
35#define IXGBE_82599_MAX_TX_QUEUES 128 36#define IXGBE_82599_MAX_TX_QUEUES 128
36#define IXGBE_82599_MAX_RX_QUEUES 128 37#define IXGBE_82599_MAX_RX_QUEUES 128
@@ -889,7 +890,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
889static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 890static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
890{ 891{
891 s32 status = 0; 892 s32 status = 0;
892 u32 ctrl, ctrl_ext; 893 u32 ctrl;
893 u32 i; 894 u32 i;
894 u32 autoc; 895 u32 autoc;
895 u32 autoc2; 896 u32 autoc2;
@@ -944,15 +945,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
944 status = IXGBE_ERR_RESET_FAILED; 945 status = IXGBE_ERR_RESET_FAILED;
945 hw_dbg(hw, "Reset polling failed to complete.\n"); 946 hw_dbg(hw, "Reset polling failed to complete.\n");
946 } 947 }
947 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
948 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
949 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
950 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
951 948
952 msleep(50); 949 msleep(50);
953 950
954
955
956 /* 951 /*
957 * Store the original AUTOC/AUTOC2 values if they have not been 952 * Store the original AUTOC/AUTOC2 values if they have not been
958 * stored off yet. Otherwise restore the stored original 953 * stored off yet. Otherwise restore the stored original
@@ -1095,9 +1090,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1095 bool vlan_on) 1090 bool vlan_on)
1096{ 1091{
1097 u32 regindex; 1092 u32 regindex;
1093 u32 vlvf_index;
1098 u32 bitindex; 1094 u32 bitindex;
1099 u32 bits; 1095 u32 bits;
1100 u32 first_empty_slot; 1096 u32 first_empty_slot;
1097 u32 vt_ctl;
1101 1098
1102 if (vlan > 4095) 1099 if (vlan > 4095)
1103 return IXGBE_ERR_PARAM; 1100 return IXGBE_ERR_PARAM;
@@ -1124,76 +1121,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1124 1121
1125 1122
1126 /* Part 2 1123 /* Part 2
1127 * If the vind is set 1124 * If VT mode is set
1128 * Either vlan_on 1125 * Either vlan_on
1129 * make sure the vlan is in VLVF 1126 * make sure the vlan is in VLVF
1130 * set the vind bit in the matching VLVFB 1127 * set the vind bit in the matching VLVFB
1131 * Or !vlan_on 1128 * Or !vlan_on
1132 * clear the pool bit and possibly the vind 1129 * clear the pool bit and possibly the vind
1133 */ 1130 */
1134 if (vind) { 1131 vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
1135 /* find the vlanid or the first empty slot */ 1132 if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
1136 first_empty_slot = 0; 1133 goto out;
1137
1138 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
1139 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
1140 if (!bits && !first_empty_slot)
1141 first_empty_slot = regindex;
1142 else if ((bits & 0x0FFF) == vlan)
1143 break;
1144 }
1145 1134
1146 if (regindex >= IXGBE_VLVF_ENTRIES) { 1135 /* find the vlanid or the first empty slot */
1147 if (first_empty_slot) 1136 first_empty_slot = 0;
1148 regindex = first_empty_slot; 1137
1149 else { 1138 for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
1150 hw_dbg(hw, "No space in VLVF.\n"); 1139 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
1151 goto out; 1140 if (!bits && !first_empty_slot)
1152 } 1141 first_empty_slot = vlvf_index;
1142 else if ((bits & 0x0FFF) == vlan)
1143 break;
1144 }
1145
1146 if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
1147 if (first_empty_slot)
1148 vlvf_index = first_empty_slot;
1149 else {
1150 hw_dbg(hw, "No space in VLVF.\n");
1151 goto out;
1153 } 1152 }
1153 }
1154 1154
1155 if (vlan_on) { 1155 if (vlan_on) {
1156 /* set the pool bit */ 1156 /* set the pool bit */
1157 if (vind < 32) { 1157 if (vind < 32) {
1158 bits = IXGBE_READ_REG(hw, 1158 bits = IXGBE_READ_REG(hw,
1159 IXGBE_VLVFB(regindex * 2)); 1159 IXGBE_VLVFB(vlvf_index * 2));
1160 bits |= (1 << vind); 1160 bits |= (1 << vind);
1161 IXGBE_WRITE_REG(hw, 1161 IXGBE_WRITE_REG(hw,
1162 IXGBE_VLVFB(regindex * 2), bits); 1162 IXGBE_VLVFB(vlvf_index * 2), bits);
1163 } else {
1164 bits = IXGBE_READ_REG(hw,
1165 IXGBE_VLVFB((regindex * 2) + 1));
1166 bits |= (1 << vind);
1167 IXGBE_WRITE_REG(hw,
1168 IXGBE_VLVFB((regindex * 2) + 1), bits);
1169 }
1170 } else { 1163 } else {
1171 /* clear the pool bit */ 1164 bits = IXGBE_READ_REG(hw,
1172 if (vind < 32) { 1165 IXGBE_VLVFB((vlvf_index * 2) + 1));
1173 bits = IXGBE_READ_REG(hw, 1166 bits |= (1 << (vind - 32));
1174 IXGBE_VLVFB(regindex * 2)); 1167 IXGBE_WRITE_REG(hw,
1168 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1169 }
1170 } else {
1171 /* clear the pool bit */
1172 if (vind < 32) {
1173 bits = IXGBE_READ_REG(hw,
1174 IXGBE_VLVFB(vlvf_index * 2));
1175 bits &= ~(1 << vind); 1175 bits &= ~(1 << vind);
1176 IXGBE_WRITE_REG(hw, 1176 IXGBE_WRITE_REG(hw,
1177 IXGBE_VLVFB(regindex * 2), bits); 1177 IXGBE_VLVFB(vlvf_index * 2), bits);
1178 bits |= IXGBE_READ_REG(hw, 1178 bits |= IXGBE_READ_REG(hw,
1179 IXGBE_VLVFB((regindex * 2) + 1)); 1179 IXGBE_VLVFB((vlvf_index * 2) + 1));
1180 } else { 1180 } else {
1181 bits = IXGBE_READ_REG(hw, 1181 bits = IXGBE_READ_REG(hw,
1182 IXGBE_VLVFB((regindex * 2) + 1)); 1182 IXGBE_VLVFB((vlvf_index * 2) + 1));
1183 bits &= ~(1 << vind); 1183 bits &= ~(1 << (vind - 32));
1184 IXGBE_WRITE_REG(hw, 1184 IXGBE_WRITE_REG(hw,
1185 IXGBE_VLVFB((regindex * 2) + 1), bits); 1185 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1186 bits |= IXGBE_READ_REG(hw, 1186 bits |= IXGBE_READ_REG(hw,
1187 IXGBE_VLVFB(regindex * 2)); 1187 IXGBE_VLVFB(vlvf_index * 2));
1188 }
1189 } 1188 }
1189 }
1190 1190
1191 if (bits) 1191 if (bits) {
1192 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 1192 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
1193 (IXGBE_VLVF_VIEN | vlan)); 1193 (IXGBE_VLVF_VIEN | vlan));
1194 else 1194 /* if bits is non-zero then some pools/VFs are still
1195 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0); 1195 * using this VLAN ID. Force the VFTA entry to on */
1196 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1197 bits |= (1 << bitindex);
1198 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1196 } 1199 }
1200 else
1201 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
1197 1202
1198out: 1203out:
1199 return 0; 1204 return 0;
@@ -2655,4 +2660,5 @@ struct ixgbe_info ixgbe_82599_info = {
2655 .mac_ops = &mac_ops_82599, 2660 .mac_ops = &mac_ops_82599,
2656 .eeprom_ops = &eeprom_ops_82599, 2661 .eeprom_ops = &eeprom_ops_82599,
2657 .phy_ops = &phy_ops_82599, 2662 .phy_ops = &phy_ops_82599,
2663 .mbx_ops = &mbx_ops_82599,
2658}; 2664};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 21f158f79dd..eb49020903c 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -28,7 +28,6 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/list.h>
32#include <linux/netdevice.h> 31#include <linux/netdevice.h>
33 32
34#include "ixgbe.h" 33#include "ixgbe.h"
@@ -1278,19 +1277,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1278 /* Get the MAC address from the RAR0 for later reference */ 1277 /* Get the MAC address from the RAR0 for later reference */
1279 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1278 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1280 1279
1281 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1280 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
1282 hw->mac.addr[0], hw->mac.addr[1],
1283 hw->mac.addr[2]);
1284 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
1285 hw->mac.addr[4], hw->mac.addr[5]);
1286 } else { 1281 } else {
1287 /* Setup the receive address. */ 1282 /* Setup the receive address. */
1288 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1283 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1289 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", 1284 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1290 hw->mac.addr[0], hw->mac.addr[1],
1291 hw->mac.addr[2]);
1292 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
1293 hw->mac.addr[4], hw->mac.addr[5]);
1294 1285
1295 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1286 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1296 } 1287 }
@@ -1355,7 +1346,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1355/** 1346/**
1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1347 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1357 * @hw: pointer to hardware structure 1348 * @hw: pointer to hardware structure
1358 * @uc_list: the list of new addresses 1349 * @netdev: pointer to net device structure
1359 * 1350 *
1360 * The given list replaces any existing list. Clears the secondary addrs from 1351 * The given list replaces any existing list. Clears the secondary addrs from
1361 * receive address registers. Uses unused receive address registers for the 1352 * receive address registers. Uses unused receive address registers for the
@@ -1365,7 +1356,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1365 * manually putting the device into promiscuous mode. 1356 * manually putting the device into promiscuous mode.
1366 **/ 1357 **/
1367s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 1358s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1368 struct list_head *uc_list) 1359 struct net_device *netdev)
1369{ 1360{
1370 u32 i; 1361 u32 i;
1371 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 1362 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
@@ -1389,7 +1380,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1389 } 1380 }
1390 1381
1391 /* Add the new addresses */ 1382 /* Add the new addresses */
1392 list_for_each_entry(ha, uc_list, list) { 1383 netdev_for_each_uc_addr(ha, netdev) {
1393 hw_dbg(hw, " Adding the secondary addresses:\n"); 1384 hw_dbg(hw, " Adding the secondary addresses:\n");
1394 ixgbe_add_uc_addr(hw, ha->addr, 0); 1385 ixgbe_add_uc_addr(hw, ha->addr, 0);
1395 } 1386 }
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index dfff0ffaa50..13606d4809c 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -60,7 +60,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
60 u32 mc_addr_count, 60 u32 mc_addr_count,
61 ixgbe_mc_addr_itr func); 61 ixgbe_mc_addr_itr func);
62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
63 struct list_head *uc_list); 63 struct net_device *netdev);
64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
65s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 65s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
66s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 66s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index d77961fc75f..1525c86cbcc 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1867,11 +1867,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
1867 if (ixgbe_intr_test(adapter, &data[2])) 1867 if (ixgbe_intr_test(adapter, &data[2]))
1868 eth_test->flags |= ETH_TEST_FL_FAILED; 1868 eth_test->flags |= ETH_TEST_FL_FAILED;
1869 1869
1870 /* If SRIOV or VMDq is enabled then skip MAC
1871 * loopback diagnostic. */
1872 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1873 IXGBE_FLAG_VMDQ_ENABLED)) {
1874 DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
1875 "mode\n");
1876 data[3] = 0;
1877 goto skip_loopback;
1878 }
1879
1870 ixgbe_reset(adapter); 1880 ixgbe_reset(adapter);
1871 DPRINTK(HW, INFO, "loopback testing starting\n"); 1881 DPRINTK(HW, INFO, "loopback testing starting\n");
1872 if (ixgbe_loopback_test(adapter, &data[3])) 1882 if (ixgbe_loopback_test(adapter, &data[3]))
1873 eth_test->flags |= ETH_TEST_FL_FAILED; 1883 eth_test->flags |= ETH_TEST_FL_FAILED;
1874 1884
1885skip_loopback:
1875 ixgbe_reset(adapter); 1886 ixgbe_reset(adapter);
1876 1887
1877 clear_bit(__IXGBE_TESTING, &adapter->state); 1888 clear_bit(__IXGBE_TESTING, &adapter->state);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7b7c8486c0b..f098816d419 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -45,6 +45,7 @@
45#include "ixgbe.h" 45#include "ixgbe.h"
46#include "ixgbe_common.h" 46#include "ixgbe_common.h"
47#include "ixgbe_dcb_82599.h" 47#include "ixgbe_dcb_82599.h"
48#include "ixgbe_sriov.h"
48 49
49char ixgbe_driver_name[] = "ixgbe"; 50char ixgbe_driver_name[] = "ixgbe";
50static const char ixgbe_driver_string[] = 51static const char ixgbe_driver_string[] =
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 68 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68 * Class, Class Mask, private data (not used) } 69 * Class, Class Mask, private data (not used) }
69 */ 70 */
70static struct pci_device_id ixgbe_pci_tbl[] = { 71static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), 72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
72 board_82598 }, 73 board_82598 },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 74 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = {
124}; 125};
125#endif 126#endif
126 127
128#ifdef CONFIG_PCI_IOV
129static unsigned int max_vfs;
130module_param(max_vfs, uint, 0);
131MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
132 "per physical function");
133#endif /* CONFIG_PCI_IOV */
134
127MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 135MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
128MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 136MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
129MODULE_LICENSE("GPL"); 137MODULE_LICENSE("GPL");
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
131 139
132#define DEFAULT_DEBUG_LEVEL_SHIFT 3 140#define DEFAULT_DEBUG_LEVEL_SHIFT 3
133 141
142static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
143{
144 struct ixgbe_hw *hw = &adapter->hw;
145 u32 gcr;
146 u32 gpie;
147 u32 vmdctl;
148
149#ifdef CONFIG_PCI_IOV
150 /* disable iov and allow time for transactions to clear */
151 pci_disable_sriov(adapter->pdev);
152#endif
153
154 /* turn off device IOV mode */
155 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
156 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
157 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
158 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
159 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
160 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
161
162 /* set default pool back to 0 */
163 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
164 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
165 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
166
167 /* take a breather then clean up driver data */
168 msleep(100);
169 if (adapter->vfinfo)
170 kfree(adapter->vfinfo);
171 adapter->vfinfo = NULL;
172
173 adapter->num_vfs = 0;
174 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
175}
176
134static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 177static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
135{ 178{
136 u32 ctrl_ext; 179 u32 ctrl_ext;
@@ -1025,7 +1068,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1025 1068
1026 /* set up to autoclear timer, and the vectors */ 1069 /* set up to autoclear timer, and the vectors */
1027 mask = IXGBE_EIMS_ENABLE_MASK; 1070 mask = IXGBE_EIMS_ENABLE_MASK;
1028 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1071 if (adapter->num_vfs)
1072 mask &= ~(IXGBE_EIMS_OTHER |
1073 IXGBE_EIMS_MAILBOX |
1074 IXGBE_EIMS_LSC);
1075 else
1076 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1029 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 1077 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
1030} 1078}
1031 1079
@@ -1254,6 +1302,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1254 if (eicr & IXGBE_EICR_LSC) 1302 if (eicr & IXGBE_EICR_LSC)
1255 ixgbe_check_lsc(adapter); 1303 ixgbe_check_lsc(adapter);
1256 1304
1305 if (eicr & IXGBE_EICR_MAILBOX)
1306 ixgbe_msg_task(adapter);
1307
1257 if (hw->mac.type == ixgbe_mac_82598EB) 1308 if (hw->mac.type == ixgbe_mac_82598EB)
1258 ixgbe_check_fan_failure(adapter, eicr); 1309 ixgbe_check_fan_failure(adapter, eicr);
1259 1310
@@ -1768,6 +1819,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1768 mask |= IXGBE_EIMS_ECC; 1819 mask |= IXGBE_EIMS_ECC;
1769 mask |= IXGBE_EIMS_GPI_SDP1; 1820 mask |= IXGBE_EIMS_GPI_SDP1;
1770 mask |= IXGBE_EIMS_GPI_SDP2; 1821 mask |= IXGBE_EIMS_GPI_SDP2;
1822 if (adapter->num_vfs)
1823 mask |= IXGBE_EIMS_MAILBOX;
1771 } 1824 }
1772 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 1825 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1773 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 1826 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1776,6 +1829,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1776 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1829 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1777 ixgbe_irq_enable_queues(adapter, ~0); 1830 ixgbe_irq_enable_queues(adapter, ~0);
1778 IXGBE_WRITE_FLUSH(&adapter->hw); 1831 IXGBE_WRITE_FLUSH(&adapter->hw);
1832
1833 if (adapter->num_vfs > 32) {
1834 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
1835 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
1836 }
1779} 1837}
1780 1838
1781/** 1839/**
@@ -1905,6 +1963,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 1963 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1906 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 1964 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
1907 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 1965 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1966 if (adapter->num_vfs > 32)
1967 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
1908 } 1968 }
1909 IXGBE_WRITE_FLUSH(&adapter->hw); 1969 IXGBE_WRITE_FLUSH(&adapter->hw);
1910 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1970 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1989,18 +2049,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1989 2049
1990 if (hw->mac.type == ixgbe_mac_82599EB) { 2050 if (hw->mac.type == ixgbe_mac_82599EB) {
1991 u32 rttdcs; 2051 u32 rttdcs;
2052 u32 mask;
1992 2053
1993 /* disable the arbiter while setting MTQC */ 2054 /* disable the arbiter while setting MTQC */
1994 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2055 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
1995 rttdcs |= IXGBE_RTTDCS_ARBDIS; 2056 rttdcs |= IXGBE_RTTDCS_ARBDIS;
1996 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2057 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
1997 2058
1998 /* We enable 8 traffic classes, DCB only */ 2059 /* set transmit pool layout */
1999 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 2060 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2000 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA | 2061 switch (adapter->flags & mask) {
2001 IXGBE_MTQC_8TC_8TQ)); 2062
2002 else 2063 case (IXGBE_FLAG_SRIOV_ENABLED):
2064 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2065 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2066 break;
2067
2068 case (IXGBE_FLAG_DCB_ENABLED):
2069 /* We enable 8 traffic classes, DCB only */
2070 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2071 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2072 break;
2073
2074 default:
2003 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2075 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2076 break;
2077 }
2004 2078
2005 /* re-eable the arbiter */ 2079 /* re-eable the arbiter */
2006 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2080 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2059,12 +2133,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2059#ifdef CONFIG_IXGBE_DCB 2133#ifdef CONFIG_IXGBE_DCB
2060 | IXGBE_FLAG_DCB_ENABLED 2134 | IXGBE_FLAG_DCB_ENABLED
2061#endif 2135#endif
2136 | IXGBE_FLAG_SRIOV_ENABLED
2062 ); 2137 );
2063 2138
2064 switch (mask) { 2139 switch (mask) {
2065 case (IXGBE_FLAG_RSS_ENABLED): 2140 case (IXGBE_FLAG_RSS_ENABLED):
2066 mrqc = IXGBE_MRQC_RSSEN; 2141 mrqc = IXGBE_MRQC_RSSEN;
2067 break; 2142 break;
2143 case (IXGBE_FLAG_SRIOV_ENABLED):
2144 mrqc = IXGBE_MRQC_VMDQEN;
2145 break;
2068#ifdef CONFIG_IXGBE_DCB 2146#ifdef CONFIG_IXGBE_DCB
2069 case (IXGBE_FLAG_DCB_ENABLED): 2147 case (IXGBE_FLAG_DCB_ENABLED):
2070 mrqc = IXGBE_MRQC_RT8TCEN; 2148 mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2145,7 +2223,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2145 int rx_buf_len; 2223 int rx_buf_len;
2146 2224
2147 /* Decide whether to use packet split mode or not */ 2225 /* Decide whether to use packet split mode or not */
2148 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 2226 /* Do not use packet split if we're in SR-IOV Mode */
2227 if (!adapter->num_vfs)
2228 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
2149 2229
2150 /* Set the RX buffer length according to the mode */ 2230 /* Set the RX buffer length according to the mode */
2151 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 2231 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2157,7 +2237,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2157 IXGBE_PSRTYPE_IPV4HDR | 2237 IXGBE_PSRTYPE_IPV4HDR |
2158 IXGBE_PSRTYPE_IPV6HDR | 2238 IXGBE_PSRTYPE_IPV6HDR |
2159 IXGBE_PSRTYPE_L2HDR; 2239 IXGBE_PSRTYPE_L2HDR;
2160 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2240 IXGBE_WRITE_REG(hw,
2241 IXGBE_PSRTYPE(adapter->num_vfs),
2242 psrtype);
2161 } 2243 }
2162 } else { 2244 } else {
2163 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 2245 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2243,6 +2325,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2243 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2325 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2244 } 2326 }
2245 2327
2328 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2329 u32 vt_reg_bits;
2330 u32 reg_offset, vf_shift;
2331 u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2332 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
2333 | IXGBE_VT_CTL_REPLEN;
2334 vt_reg_bits |= (adapter->num_vfs <<
2335 IXGBE_VT_CTL_POOL_SHIFT);
2336 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2337 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
2338
2339 vf_shift = adapter->num_vfs % 32;
2340 reg_offset = adapter->num_vfs / 32;
2341 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
2342 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
2343 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
2344 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
2345 /* Enable only the PF's pool for Tx/Rx */
2346 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2347 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2348 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2349 ixgbe_set_vmolr(hw, adapter->num_vfs);
2350 }
2351
2246 /* Program MRQC for the distribution of queues */ 2352 /* Program MRQC for the distribution of queues */
2247 mrqc = ixgbe_setup_mrqc(adapter); 2353 mrqc = ixgbe_setup_mrqc(adapter);
2248 2354
@@ -2274,6 +2380,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2274 } 2380 }
2275 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2381 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2276 2382
2383 if (adapter->num_vfs) {
2384 u32 reg;
2385
2386 /* Map PF MAC address in RAR Entry 0 to first pool
2387 * following VFs */
2388 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2389
2390 /* Set up VF register offsets for selected VT Mode, i.e.
2391 * 64 VFs for SR-IOV */
2392 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2393 reg |= IXGBE_GCR_EXT_SRIOV;
2394 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
2395 }
2396
2277 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2397 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2278 2398
2279 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || 2399 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2312,15 +2432,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2312{ 2432{
2313 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2433 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2314 struct ixgbe_hw *hw = &adapter->hw; 2434 struct ixgbe_hw *hw = &adapter->hw;
2435 int pool_ndx = adapter->num_vfs;
2315 2436
2316 /* add VID to filter table */ 2437 /* add VID to filter table */
2317 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true); 2438 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
2318} 2439}
2319 2440
2320static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2441static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2321{ 2442{
2322 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2443 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2323 struct ixgbe_hw *hw = &adapter->hw; 2444 struct ixgbe_hw *hw = &adapter->hw;
2445 int pool_ndx = adapter->num_vfs;
2324 2446
2325 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2447 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2326 ixgbe_irq_disable(adapter); 2448 ixgbe_irq_disable(adapter);
@@ -2331,7 +2453,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2331 ixgbe_irq_enable(adapter); 2453 ixgbe_irq_enable(adapter);
2332 2454
2333 /* remove VID from filter table */ 2455 /* remove VID from filter table */
2334 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); 2456 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
2335} 2457}
2336 2458
2337static void ixgbe_vlan_rx_register(struct net_device *netdev, 2459static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2414,7 +2536,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2414 * responsible for configuring the hardware for proper unicast, multicast and 2536 * responsible for configuring the hardware for proper unicast, multicast and
2415 * promiscuous mode. 2537 * promiscuous mode.
2416 **/ 2538 **/
2417static void ixgbe_set_rx_mode(struct net_device *netdev) 2539void ixgbe_set_rx_mode(struct net_device *netdev)
2418{ 2540{
2419 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2541 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2420 struct ixgbe_hw *hw = &adapter->hw; 2542 struct ixgbe_hw *hw = &adapter->hw;
@@ -2446,7 +2568,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
2446 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2568 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2447 2569
2448 /* reprogram secondary unicast list */ 2570 /* reprogram secondary unicast list */
2449 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list); 2571 hw->mac.ops.update_uc_addr_list(hw, netdev);
2450 2572
2451 /* reprogram multicast list */ 2573 /* reprogram multicast list */
2452 addr_count = netdev->mc_count; 2574 addr_count = netdev->mc_count;
@@ -2454,6 +2576,8 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
2454 addr_list = netdev->mc_list->dmi_addr; 2576 addr_list = netdev->mc_list->dmi_addr;
2455 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, 2577 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2456 ixgbe_addr_list_itr); 2578 ixgbe_addr_list_itr);
2579 if (adapter->num_vfs)
2580 ixgbe_restore_vf_multicasts(adapter);
2457} 2581}
2458 2582
2459static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 2583static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2702,6 +2826,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2702 u32 txdctl, rxdctl, mhadd; 2826 u32 txdctl, rxdctl, mhadd;
2703 u32 dmatxctl; 2827 u32 dmatxctl;
2704 u32 gpie; 2828 u32 gpie;
2829 u32 ctrl_ext;
2705 2830
2706 ixgbe_get_hw_control(adapter); 2831 ixgbe_get_hw_control(adapter);
2707 2832
@@ -2714,6 +2839,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2714 /* MSI only */ 2839 /* MSI only */
2715 gpie = 0; 2840 gpie = 0;
2716 } 2841 }
2842 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2843 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
2844 gpie |= IXGBE_GPIE_VTMODE_64;
2845 }
2717 /* XXX: to interrupt immediately for EICS writes, enable this */ 2846 /* XXX: to interrupt immediately for EICS writes, enable this */
2718 /* gpie |= IXGBE_GPIE_EIMEN; */ 2847 /* gpie |= IXGBE_GPIE_EIMEN; */
2719 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2848 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2788,6 +2917,18 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2788 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2917 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2789 txdctl |= IXGBE_TXDCTL_ENABLE; 2918 txdctl |= IXGBE_TXDCTL_ENABLE;
2790 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2919 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2920 if (hw->mac.type == ixgbe_mac_82599EB) {
2921 int wait_loop = 10;
2922 /* poll for Tx Enable ready */
2923 do {
2924 msleep(1);
2925 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2926 } while (--wait_loop &&
2927 !(txdctl & IXGBE_TXDCTL_ENABLE));
2928 if (!wait_loop)
2929 DPRINTK(DRV, ERR, "Could not enable "
2930 "Tx Queue %d\n", j);
2931 }
2791 } 2932 }
2792 2933
2793 for (i = 0; i < num_rx_rings; i++) { 2934 for (i = 0; i < num_rx_rings; i++) {
@@ -2875,6 +3016,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2875 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 3016 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2876 adapter->link_check_timeout = jiffies; 3017 adapter->link_check_timeout = jiffies;
2877 mod_timer(&adapter->watchdog_timer, jiffies); 3018 mod_timer(&adapter->watchdog_timer, jiffies);
3019
3020 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3021 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3022 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3023 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3024
2878 return 0; 3025 return 0;
2879} 3026}
2880 3027
@@ -2923,7 +3070,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
2923 } 3070 }
2924 3071
2925 /* reprogram the RAR[0] in case user changed it. */ 3072 /* reprogram the RAR[0] in case user changed it. */
2926 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3073 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3074 IXGBE_RAH_AV);
2927} 3075}
2928 3076
2929/** 3077/**
@@ -3055,6 +3203,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3055 /* signal that we are down to the interrupt handler */ 3203 /* signal that we are down to the interrupt handler */
3056 set_bit(__IXGBE_DOWN, &adapter->state); 3204 set_bit(__IXGBE_DOWN, &adapter->state);
3057 3205
3206 /* disable receive for all VFs and wait one second */
3207 if (adapter->num_vfs) {
3208 for (i = 0 ; i < adapter->num_vfs; i++)
3209 adapter->vfinfo[i].clear_to_send = 0;
3210
3211 /* ping all the active vfs to let them know we are going down */
3212 ixgbe_ping_all_vfs(adapter);
3213 /* Disable all VFTE/VFRE TX/RX */
3214 ixgbe_disable_tx_rx(adapter);
3215 }
3216
3058 /* disable receives */ 3217 /* disable receives */
3059 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3218 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3060 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3219 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -3291,6 +3450,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3291} 3450}
3292 3451
3293#endif /* IXGBE_FCOE */ 3452#endif /* IXGBE_FCOE */
3453/**
3454 * ixgbe_set_sriov_queues: Allocate queues for IOV use
3455 * @adapter: board private structure to initialize
3456 *
3457 * IOV doesn't actually use anything, so just NAK the
3458 * request for now and let the other queue routines
3459 * figure out what to do.
3460 */
3461static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
3462{
3463 return false;
3464}
3465
3294/* 3466/*
3295 * ixgbe_set_num_queues: Allocate queues for device, feature dependant 3467 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3296 * @adapter: board private structure to initialize 3468 * @adapter: board private structure to initialize
@@ -3304,6 +3476,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3304 **/ 3476 **/
3305static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 3477static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3306{ 3478{
3479 /* Start with base case */
3480 adapter->num_rx_queues = 1;
3481 adapter->num_tx_queues = 1;
3482 adapter->num_rx_pools = adapter->num_rx_queues;
3483 adapter->num_rx_queues_per_pool = 1;
3484
3485 if (ixgbe_set_sriov_queues(adapter))
3486 return;
3487
3307#ifdef IXGBE_FCOE 3488#ifdef IXGBE_FCOE
3308 if (ixgbe_set_fcoe_queues(adapter)) 3489 if (ixgbe_set_fcoe_queues(adapter))
3309 goto done; 3490 goto done;
@@ -3575,6 +3756,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3575 3756
3576#endif /* IXGBE_FCOE */ 3757#endif /* IXGBE_FCOE */
3577/** 3758/**
3759 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
3760 * @adapter: board private structure to initialize
3761 *
3762 * SR-IOV doesn't use any descriptor rings but changes the default if
3763 * no other mapping is used.
3764 *
3765 */
3766static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
3767{
3768 adapter->rx_ring[0].reg_idx = adapter->num_vfs * 2;
3769 adapter->tx_ring[0].reg_idx = adapter->num_vfs * 2;
3770 if (adapter->num_vfs)
3771 return true;
3772 else
3773 return false;
3774}
3775
3776/**
3578 * ixgbe_cache_ring_register - Descriptor ring to register mapping 3777 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3579 * @adapter: board private structure to initialize 3778 * @adapter: board private structure to initialize
3580 * 3779 *
@@ -3591,6 +3790,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3591 adapter->rx_ring[0].reg_idx = 0; 3790 adapter->rx_ring[0].reg_idx = 0;
3592 adapter->tx_ring[0].reg_idx = 0; 3791 adapter->tx_ring[0].reg_idx = 0;
3593 3792
3793 if (ixgbe_cache_ring_sriov(adapter))
3794 return;
3795
3594#ifdef IXGBE_FCOE 3796#ifdef IXGBE_FCOE
3595 if (ixgbe_cache_ring_fcoe(adapter)) 3797 if (ixgbe_cache_ring_fcoe(adapter))
3596 return; 3798 return;
@@ -3700,6 +3902,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3700 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 3902 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3701 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 3903 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3702 adapter->atr_sample_rate = 0; 3904 adapter->atr_sample_rate = 0;
3905 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3906 ixgbe_disable_sriov(adapter);
3907
3703 ixgbe_set_num_queues(adapter); 3908 ixgbe_set_num_queues(adapter);
3704 3909
3705 err = pci_enable_msi(adapter->pdev); 3910 err = pci_enable_msi(adapter->pdev);
@@ -5487,7 +5692,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
5487 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 5692 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5488 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 5693 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5489 5694
5490 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 5695 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
5696 IXGBE_RAH_AV);
5491 5697
5492 return 0; 5698 return 0;
5493} 5699}
@@ -5624,6 +5830,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5624#endif /* IXGBE_FCOE */ 5830#endif /* IXGBE_FCOE */
5625}; 5831};
5626 5832
5833static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
5834 const struct ixgbe_info *ii)
5835{
5836#ifdef CONFIG_PCI_IOV
5837 struct ixgbe_hw *hw = &adapter->hw;
5838 int err;
5839
5840 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
5841 return;
5842
5843 /* The 82599 supports up to 64 VFs per physical function
5844 * but this implementation limits allocation to 63 so that
5845 * basic networking resources are still available to the
5846 * physical function
5847 */
5848 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
5849 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
5850 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
5851 if (err) {
5852 DPRINTK(PROBE, ERR,
5853 "Failed to enable PCI sriov: %d\n", err);
5854 goto err_novfs;
5855 }
5856 /* If call to enable VFs succeeded then allocate memory
5857 * for per VF control structures.
5858 */
5859 adapter->vfinfo =
5860 kcalloc(adapter->num_vfs,
5861 sizeof(struct vf_data_storage), GFP_KERNEL);
5862 if (adapter->vfinfo) {
5863 /* Now that we're sure SR-IOV is enabled
5864 * and memory allocated set up the mailbox parameters
5865 */
5866 ixgbe_init_mbx_params_pf(hw);
5867 memcpy(&hw->mbx.ops, ii->mbx_ops,
5868 sizeof(hw->mbx.ops));
5869
5870 /* Disable RSC when in SR-IOV mode */
5871 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
5872 IXGBE_FLAG2_RSC_ENABLED);
5873 return;
5874 }
5875
5876 /* Oh oh */
5877 DPRINTK(PROBE, ERR,
5878 "Unable to allocate memory for VF "
5879 "Data Storage - SRIOV disabled\n");
5880 pci_disable_sriov(adapter->pdev);
5881
5882err_novfs:
5883 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
5884 adapter->num_vfs = 0;
5885#endif /* CONFIG_PCI_IOV */
5886}
5887
5627/** 5888/**
5628 * ixgbe_probe - Device Initialization Routine 5889 * ixgbe_probe - Device Initialization Routine
5629 * @pdev: PCI device information struct 5890 * @pdev: PCI device information struct
@@ -5798,6 +6059,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5798 goto err_sw_init; 6059 goto err_sw_init;
5799 } 6060 }
5800 6061
6062 ixgbe_probe_vf(adapter, ii);
6063
5801 netdev->features = NETIF_F_SG | 6064 netdev->features = NETIF_F_SG |
5802 NETIF_F_IP_CSUM | 6065 NETIF_F_IP_CSUM |
5803 NETIF_F_HW_VLAN_TX | 6066 NETIF_F_HW_VLAN_TX |
@@ -5818,6 +6081,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5818 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 6081 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
5819 netdev->vlan_features |= NETIF_F_SG; 6082 netdev->vlan_features |= NETIF_F_SG;
5820 6083
6084 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6085 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6086 IXGBE_FLAG_DCB_ENABLED);
5821 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 6087 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5822 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 6088 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5823 6089
@@ -5944,6 +6210,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5944 ixgbe_setup_dca(adapter); 6210 ixgbe_setup_dca(adapter);
5945 } 6211 }
5946#endif 6212#endif
6213 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6214 DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
6215 adapter->num_vfs);
6216 for (i = 0; i < adapter->num_vfs; i++)
6217 ixgbe_vf_configuration(pdev, (i | 0x10000000));
6218 }
6219
5947 /* add san mac addr to netdev */ 6220 /* add san mac addr to netdev */
5948 ixgbe_add_sanmac_netdev(netdev); 6221 ixgbe_add_sanmac_netdev(netdev);
5949 6222
@@ -5956,6 +6229,8 @@ err_register:
5956 ixgbe_clear_interrupt_scheme(adapter); 6229 ixgbe_clear_interrupt_scheme(adapter);
5957err_sw_init: 6230err_sw_init:
5958err_eeprom: 6231err_eeprom:
6232 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6233 ixgbe_disable_sriov(adapter);
5959 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 6234 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5960 del_timer_sync(&adapter->sfp_timer); 6235 del_timer_sync(&adapter->sfp_timer);
5961 cancel_work_sync(&adapter->sfp_task); 6236 cancel_work_sync(&adapter->sfp_task);
@@ -6024,6 +6299,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6024 if (netdev->reg_state == NETREG_REGISTERED) 6299 if (netdev->reg_state == NETREG_REGISTERED)
6025 unregister_netdev(netdev); 6300 unregister_netdev(netdev);
6026 6301
6302 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6303 ixgbe_disable_sriov(adapter);
6304
6027 ixgbe_clear_interrupt_scheme(adapter); 6305 ixgbe_clear_interrupt_scheme(adapter);
6028 6306
6029 ixgbe_release_hw_control(adapter); 6307 ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
new file mode 100644
index 00000000000..d75f9148eb1
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,479 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include "ixgbe_type.h"
31#include "ixgbe_common.h"
32#include "ixgbe_mbx.h"
33
34/**
35 * ixgbe_read_mbx - Reads a message from the mailbox
36 * @hw: pointer to the HW structure
37 * @msg: The message buffer
38 * @size: Length of buffer
39 * @mbx_id: id of mailbox to read
40 *
41 * returns SUCCESS if it successfuly read message from buffer
42 **/
43s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
44{
45 struct ixgbe_mbx_info *mbx = &hw->mbx;
46 s32 ret_val = IXGBE_ERR_MBX;
47
48 /* limit read to size of mailbox */
49 if (size > mbx->size)
50 size = mbx->size;
51
52 if (mbx->ops.read)
53 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
54
55 return ret_val;
56}
57
58/**
59 * ixgbe_write_mbx - Write a message to the mailbox
60 * @hw: pointer to the HW structure
61 * @msg: The message buffer
62 * @size: Length of buffer
63 * @mbx_id: id of mailbox to write
64 *
65 * returns SUCCESS if it successfully copied message into the buffer
66 **/
67s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
68{
69 struct ixgbe_mbx_info *mbx = &hw->mbx;
70 s32 ret_val = 0;
71
72 if (size > mbx->size)
73 ret_val = IXGBE_ERR_MBX;
74
75 else if (mbx->ops.write)
76 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
77
78 return ret_val;
79}
80
81/**
82 * ixgbe_check_for_msg - checks to see if someone sent us mail
83 * @hw: pointer to the HW structure
84 * @mbx_id: id of mailbox to check
85 *
86 * returns SUCCESS if the Status bit was found or else ERR_MBX
87 **/
88s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
89{
90 struct ixgbe_mbx_info *mbx = &hw->mbx;
91 s32 ret_val = IXGBE_ERR_MBX;
92
93 if (mbx->ops.check_for_msg)
94 ret_val = mbx->ops.check_for_msg(hw, mbx_id);
95
96 return ret_val;
97}
98
99/**
100 * ixgbe_check_for_ack - checks to see if someone sent us ACK
101 * @hw: pointer to the HW structure
102 * @mbx_id: id of mailbox to check
103 *
104 * returns SUCCESS if the Status bit was found or else ERR_MBX
105 **/
106s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
107{
108 struct ixgbe_mbx_info *mbx = &hw->mbx;
109 s32 ret_val = IXGBE_ERR_MBX;
110
111 if (mbx->ops.check_for_ack)
112 ret_val = mbx->ops.check_for_ack(hw, mbx_id);
113
114 return ret_val;
115}
116
117/**
118 * ixgbe_check_for_rst - checks to see if other side has reset
119 * @hw: pointer to the HW structure
120 * @mbx_id: id of mailbox to check
121 *
122 * returns SUCCESS if the Status bit was found or else ERR_MBX
123 **/
124s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
125{
126 struct ixgbe_mbx_info *mbx = &hw->mbx;
127 s32 ret_val = IXGBE_ERR_MBX;
128
129 if (mbx->ops.check_for_rst)
130 ret_val = mbx->ops.check_for_rst(hw, mbx_id);
131
132 return ret_val;
133}
134
135/**
136 * ixgbe_poll_for_msg - Wait for message notification
137 * @hw: pointer to the HW structure
138 * @mbx_id: id of mailbox to write
139 *
140 * returns SUCCESS if it successfully received a message notification
141 **/
142static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
143{
144 struct ixgbe_mbx_info *mbx = &hw->mbx;
145 int countdown = mbx->timeout;
146
147 if (!countdown || !mbx->ops.check_for_msg)
148 goto out;
149
150 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
151 countdown--;
152 if (!countdown)
153 break;
154 udelay(mbx->usec_delay);
155 }
156
157 /* if we failed, all future posted messages fail until reset */
158 if (!countdown)
159 mbx->timeout = 0;
160out:
161 return countdown ? 0 : IXGBE_ERR_MBX;
162}
163
164/**
165 * ixgbe_poll_for_ack - Wait for message acknowledgement
166 * @hw: pointer to the HW structure
167 * @mbx_id: id of mailbox to write
168 *
169 * returns SUCCESS if it successfully received a message acknowledgement
170 **/
171static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
172{
173 struct ixgbe_mbx_info *mbx = &hw->mbx;
174 int countdown = mbx->timeout;
175
176 if (!countdown || !mbx->ops.check_for_ack)
177 goto out;
178
179 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
180 countdown--;
181 if (!countdown)
182 break;
183 udelay(mbx->usec_delay);
184 }
185
186 /* if we failed, all future posted messages fail until reset */
187 if (!countdown)
188 mbx->timeout = 0;
189out:
190 return countdown ? 0 : IXGBE_ERR_MBX;
191}
192
193/**
194 * ixgbe_read_posted_mbx - Wait for message notification and receive message
195 * @hw: pointer to the HW structure
196 * @msg: The message buffer
197 * @size: Length of buffer
198 * @mbx_id: id of mailbox to write
199 *
200 * returns SUCCESS if it successfully received a message notification and
201 * copied it into the receive buffer.
202 **/
203s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
204{
205 struct ixgbe_mbx_info *mbx = &hw->mbx;
206 s32 ret_val = IXGBE_ERR_MBX;
207
208 if (!mbx->ops.read)
209 goto out;
210
211 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
212
213 /* if ack received read message, otherwise we timed out */
214 if (!ret_val)
215 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
216out:
217 return ret_val;
218}
219
220/**
221 * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
222 * @hw: pointer to the HW structure
223 * @msg: The message buffer
224 * @size: Length of buffer
225 * @mbx_id: id of mailbox to write
226 *
227 * returns SUCCESS if it successfully copied message into the buffer and
228 * received an ack to that message within delay * timeout period
229 **/
230s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
231 u16 mbx_id)
232{
233 struct ixgbe_mbx_info *mbx = &hw->mbx;
234 s32 ret_val = IXGBE_ERR_MBX;
235
236 /* exit if either we can't write or there isn't a defined timeout */
237 if (!mbx->ops.write || !mbx->timeout)
238 goto out;
239
240 /* send msg */
241 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
242
243 /* if msg sent wait until we receive an ack */
244 if (!ret_val)
245 ret_val = ixgbe_poll_for_ack(hw, mbx_id);
246out:
247 return ret_val;
248}
249
250/**
251 * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
252 * @hw: pointer to the HW structure
253 *
254 * Setup the mailbox read and write message function pointers
255 **/
256void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
257{
258 struct ixgbe_mbx_info *mbx = &hw->mbx;
259
260 mbx->ops.read_posted = ixgbe_read_posted_mbx;
261 mbx->ops.write_posted = ixgbe_write_posted_mbx;
262}
263
264static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
265{
266 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
267 s32 ret_val = IXGBE_ERR_MBX;
268
269 if (mbvficr & mask) {
270 ret_val = 0;
271 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
272 }
273
274 return ret_val;
275}
276
277/**
278 * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
279 * @hw: pointer to the HW structure
280 * @vf_number: the VF index
281 *
282 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
283 **/
284static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
285{
286 s32 ret_val = IXGBE_ERR_MBX;
287 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
288 u32 vf_bit = vf_number % 16;
289
290 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
291 index)) {
292 ret_val = 0;
293 hw->mbx.stats.reqs++;
294 }
295
296 return ret_val;
297}
298
299/**
300 * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
301 * @hw: pointer to the HW structure
302 * @vf_number: the VF index
303 *
304 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
305 **/
306static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
307{
308 s32 ret_val = IXGBE_ERR_MBX;
309 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
310 u32 vf_bit = vf_number % 16;
311
312 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
313 index)) {
314 ret_val = 0;
315 hw->mbx.stats.acks++;
316 }
317
318 return ret_val;
319}
320
321/**
322 * ixgbe_check_for_rst_pf - checks to see if the VF has reset
323 * @hw: pointer to the HW structure
324 * @vf_number: the VF index
325 *
326 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
327 **/
328static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
329{
330 u32 reg_offset = (vf_number < 32) ? 0 : 1;
331 u32 vf_shift = vf_number % 32;
332 u32 vflre = 0;
333 s32 ret_val = IXGBE_ERR_MBX;
334
335 if (hw->mac.type == ixgbe_mac_82599EB)
336 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
337
338 if (vflre & (1 << vf_shift)) {
339 ret_val = 0;
340 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
341 hw->mbx.stats.rsts++;
342 }
343
344 return ret_val;
345}
346
347/**
348 * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
349 * @hw: pointer to the HW structure
350 * @vf_number: the VF index
351 *
352 * return SUCCESS if we obtained the mailbox lock
353 **/
354static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
355{
356 s32 ret_val = IXGBE_ERR_MBX;
357 u32 p2v_mailbox;
358
359 /* Take ownership of the buffer */
360 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
361
362 /* reserve mailbox for vf use */
363 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
364 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
365 ret_val = 0;
366
367 return ret_val;
368}
369
370/**
371 * ixgbe_write_mbx_pf - Places a message in the mailbox
372 * @hw: pointer to the HW structure
373 * @msg: The message buffer
374 * @size: Length of buffer
375 * @vf_number: the VF index
376 *
377 * returns SUCCESS if it successfully copied message into the buffer
378 **/
379static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
380 u16 vf_number)
381{
382 s32 ret_val;
383 u16 i;
384
385 /* lock the mailbox to prevent pf/vf race condition */
386 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
387 if (ret_val)
388 goto out_no_write;
389
390 /* flush msg and acks as we are overwriting the message buffer */
391 ixgbe_check_for_msg_pf(hw, vf_number);
392 ixgbe_check_for_ack_pf(hw, vf_number);
393
394 /* copy the caller specified message to the mailbox memory buffer */
395 for (i = 0; i < size; i++)
396 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
397
398 /* Interrupt VF to tell it a message has been sent and release buffer*/
399 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
400
401 /* update stats */
402 hw->mbx.stats.msgs_tx++;
403
404out_no_write:
405 return ret_val;
406
407}
408
409/**
410 * ixgbe_read_mbx_pf - Read a message from the mailbox
411 * @hw: pointer to the HW structure
412 * @msg: The message buffer
413 * @size: Length of buffer
414 * @vf_number: the VF index
415 *
416 * This function copies a message from the mailbox buffer to the caller's
417 * memory buffer. The presumption is that the caller knows that there was
418 * a message due to a VF request so no polling for message is needed.
419 **/
420static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
421 u16 vf_number)
422{
423 s32 ret_val;
424 u16 i;
425
426 /* lock the mailbox to prevent pf/vf race condition */
427 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
428 if (ret_val)
429 goto out_no_read;
430
431 /* copy the message to the mailbox memory buffer */
432 for (i = 0; i < size; i++)
433 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
434
435 /* Acknowledge the message and release buffer */
436 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
437
438 /* update stats */
439 hw->mbx.stats.msgs_rx++;
440
441out_no_read:
442 return ret_val;
443}
444
445/**
446 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
447 * @hw: pointer to the HW structure
448 *
449 * Initializes the hw->mbx struct to correct values for pf mailbox
450 */
451void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
452{
453 struct ixgbe_mbx_info *mbx = &hw->mbx;
454
455 if (hw->mac.type != ixgbe_mac_82599EB)
456 return;
457
458 mbx->timeout = 0;
459 mbx->usec_delay = 0;
460
461 mbx->size = IXGBE_VFMAILBOX_SIZE;
462
463 mbx->stats.msgs_tx = 0;
464 mbx->stats.msgs_rx = 0;
465 mbx->stats.reqs = 0;
466 mbx->stats.acks = 0;
467 mbx->stats.rsts = 0;
468}
469
470struct ixgbe_mbx_operations mbx_ops_82599 = {
471 .read = ixgbe_read_mbx_pf,
472 .write = ixgbe_write_mbx_pf,
473 .read_posted = ixgbe_read_posted_mbx,
474 .write_posted = ixgbe_write_posted_mbx,
475 .check_for_msg = ixgbe_check_for_msg_pf,
476 .check_for_ack = ixgbe_check_for_ack_pf,
477 .check_for_rst = ixgbe_check_for_rst_pf,
478};
479
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
new file mode 100644
index 00000000000..be7ab3309ab
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,96 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_MBX_H_
29#define _IXGBE_MBX_H_
30
31#include "ixgbe_type.h"
32
33#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
34#define IXGBE_ERR_MBX -100
35
36#define IXGBE_VFMAILBOX 0x002FC
37#define IXGBE_VFMBMEM 0x00200
38
39#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
40#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
41
42#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
43#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
44#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
45#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
46#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
47
48#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
49#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
50#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
51#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
52
53
54/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
55 * PF. The reverse is true if it is IXGBE_PF_*.
56 * Message ACK's are the value or'd with 0xF0000000
57 */
58#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
59 * this are the ACK */
60#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
61 * this are the NACK */
62#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
63 clear to send requests */
64#define IXGBE_VT_MSGINFO_SHIFT 16
65/* bits 23:16 are used for exra info for certain messages */
66#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
67
68#define IXGBE_VF_RESET 0x01 /* VF requests reset */
69#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
70#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
71#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
72#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
73
74/* length of permanent address message returned from PF */
75#define IXGBE_VF_PERMADDR_MSG_LEN 4
76/* word in permanent address message with the current multicast type */
77#define IXGBE_VF_MC_TYPE_WORD 3
78
79#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
80
81#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
82#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
83
84s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
85s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
86s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
87s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
88s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
89s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
90s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
91void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
92void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
93
94extern struct ixgbe_mbx_operations mbx_ops_82599;
95
96#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
new file mode 100644
index 00000000000..d4cd20f3019
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,362 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
39#ifdef NETIF_F_HW_VLAN_TX
40#include <linux/if_vlan.h>
41#endif
42
43#include "ixgbe.h"
44
45#include "ixgbe_sriov.h"
46
47int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
48 int entries, u16 *hash_list, u32 vf)
49{
50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
51 int i;
52
53 /* only so many hash values supported */
54 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
55
56 /*
57 * salt away the number of multi cast addresses assigned
58 * to this VF for later use to restore when the PF multi cast
59 * list changes
60 */
61 vfinfo->num_vf_mc_hashes = entries;
62
63 /*
64 * VFs are limited to using the MTA hash table for their multicast
65 * addresses
66 */
67 for (i = 0; i < entries; i++) {
68 vfinfo->vf_mc_hashes[i] = hash_list[i];;
69 }
70
71 /* Flush and reset the mta with the new values */
72 ixgbe_set_rx_mode(adapter->netdev);
73
74 return 0;
75}
76
77void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
78{
79 struct ixgbe_hw *hw = &adapter->hw;
80 struct vf_data_storage *vfinfo;
81 int i, j;
82 u32 vector_bit;
83 u32 vector_reg;
84 u32 mta_reg;
85
86 for (i = 0; i < adapter->num_vfs; i++) {
87 vfinfo = &adapter->vfinfo[i];
88 for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
89 hw->addr_ctrl.mta_in_use++;
90 vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
91 vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
92 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
93 mta_reg |= (1 << vector_bit);
94 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
95 }
96 }
97}
98
99int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
100{
101 u32 ctrl;
102
103 /* Check if global VLAN already set, if not set it */
104 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
105 if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
106 /* enable VLAN tag insert/strip */
107 ctrl |= IXGBE_VLNCTRL_VFE;
108 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
109 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
110 }
111
112 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
113}
114
115
116void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
117{
118 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
119 vmolr |= (IXGBE_VMOLR_AUPE |
120 IXGBE_VMOLR_ROMPE |
121 IXGBE_VMOLR_ROPE |
122 IXGBE_VMOLR_BAM);
123 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
124}
125
126inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
127{
128 struct ixgbe_hw *hw = &adapter->hw;
129
130 /* reset offloads to defaults */
131 ixgbe_set_vmolr(hw, vf);
132
133
134 /* reset multicast table array for vf */
135 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
136
137 /* Flush and reset the mta with the new values */
138 ixgbe_set_rx_mode(adapter->netdev);
139
140 if (adapter->vfinfo[vf].rar > 0) {
141 adapter->hw.mac.ops.clear_rar(&adapter->hw,
142 adapter->vfinfo[vf].rar);
143 adapter->vfinfo[vf].rar = -1;
144 }
145}
146
147int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
148 int vf, unsigned char *mac_addr)
149{
150 struct ixgbe_hw *hw = &adapter->hw;
151
152 adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
153 vf, IXGBE_RAH_AV);
154 if (adapter->vfinfo[vf].rar < 0) {
155 DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
156 return -1;
157 }
158
159 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
160
161 return 0;
162}
163
164int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
165{
166 unsigned char vf_mac_addr[6];
167 struct net_device *netdev = pci_get_drvdata(pdev);
168 struct ixgbe_adapter *adapter = netdev_priv(netdev);
169 unsigned int vfn = (event_mask & 0x3f);
170
171 bool enable = ((event_mask & 0x10000000U) != 0);
172
173 if (enable) {
174 random_ether_addr(vf_mac_addr);
175 DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
176 "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
177 vfn,
178 vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
179 vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
180 /*
181 * Store away the VF "permananet" MAC address, it will ask
182 * for it later.
183 */
184 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
185 }
186
187 return 0;
188}
189
190inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
191{
192 struct ixgbe_hw *hw = &adapter->hw;
193 u32 reg;
194 u32 reg_offset, vf_shift;
195
196 vf_shift = vf % 32;
197 reg_offset = vf / 32;
198
199 /* enable transmit and receive for vf */
200 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
201 reg |= (reg | (1 << vf_shift));
202 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
203
204 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
205 reg |= (reg | (1 << vf_shift));
206 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
207
208 ixgbe_vf_reset_event(adapter, vf);
209}
210
211static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
212{
213 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
214 u32 msgbuf[mbx_size];
215 struct ixgbe_hw *hw = &adapter->hw;
216 s32 retval;
217 int entries;
218 u16 *hash_list;
219 int add, vid;
220
221 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
222
223 if (retval)
224 printk(KERN_ERR "Error receiving message from VF\n");
225
226 /* this is a message we already processed, do nothing */
227 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
228 return retval;
229
230 /*
231 * until the vf completes a virtual function reset it should not be
232 * allowed to start any configuration.
233 */
234
235 if (msgbuf[0] == IXGBE_VF_RESET) {
236 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
237 u8 *addr = (u8 *)(&msgbuf[1]);
238 DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
239 adapter->vfinfo[vf].clear_to_send = false;
240 ixgbe_vf_reset_msg(adapter, vf);
241 adapter->vfinfo[vf].clear_to_send = true;
242
243 /* reply to reset with ack and vf mac address */
244 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
245 memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
246 /*
247 * Piggyback the multicast filter type so VF can compute the
248 * correct vectors
249 */
250 msgbuf[3] = hw->mac.mc_filter_type;
251 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
252
253 return retval;
254 }
255
256 if (!adapter->vfinfo[vf].clear_to_send) {
257 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
258 ixgbe_write_mbx(hw, msgbuf, 1, vf);
259 return retval;
260 }
261
262 switch ((msgbuf[0] & 0xFFFF)) {
263 case IXGBE_VF_SET_MAC_ADDR:
264 {
265 u8 *new_mac = ((u8 *)(&msgbuf[1]));
266 if (is_valid_ether_addr(new_mac))
267 ixgbe_set_vf_mac(adapter, vf, new_mac);
268 else
269 retval = -1;
270 }
271 break;
272 case IXGBE_VF_SET_MULTICAST:
273 entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
274 >> IXGBE_VT_MSGINFO_SHIFT;
275 hash_list = (u16 *)&msgbuf[1];
276 retval = ixgbe_set_vf_multicasts(adapter, entries,
277 hash_list, vf);
278 break;
279 case IXGBE_VF_SET_LPE:
280 WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
281 break;
282 case IXGBE_VF_SET_VLAN:
283 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
284 >> IXGBE_VT_MSGINFO_SHIFT;
285 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
286 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
287 break;
288 default:
289 DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
290 retval = IXGBE_ERR_MBX;
291 break;
292 }
293
294 /* notify the VF of the results of what it sent us */
295 if (retval)
296 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
297 else
298 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
299
300 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
301
302 ixgbe_write_mbx(hw, msgbuf, 1, vf);
303
304 return retval;
305}
306
307static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
308{
309 struct ixgbe_hw *hw = &adapter->hw;
310 u32 msg = IXGBE_VT_MSGTYPE_NACK;
311
312 /* if device isn't clear to send it shouldn't be reading either */
313 if (!adapter->vfinfo[vf].clear_to_send)
314 ixgbe_write_mbx(hw, &msg, 1, vf);
315}
316
317void ixgbe_msg_task(struct ixgbe_adapter *adapter)
318{
319 struct ixgbe_hw *hw = &adapter->hw;
320 u32 vf;
321
322 for (vf = 0; vf < adapter->num_vfs; vf++) {
323 /* process any reset requests */
324 if (!ixgbe_check_for_rst(hw, vf))
325 ixgbe_vf_reset_event(adapter, vf);
326
327 /* process any messages pending */
328 if (!ixgbe_check_for_msg(hw, vf))
329 ixgbe_rcv_msg_from_vf(adapter, vf);
330
331 /* process any acks */
332 if (!ixgbe_check_for_ack(hw, vf))
333 ixgbe_rcv_ack_from_vf(adapter, vf);
334 }
335}
336
337void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
338{
339 struct ixgbe_hw *hw = &adapter->hw;
340
341 /* disable transmit and receive for all vfs */
342 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
343 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
344
345 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
346 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
347}
348
349void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
350{
351 struct ixgbe_hw *hw = &adapter->hw;
352 u32 ping;
353 int i;
354
355 for (i = 0 ; i < adapter->num_vfs; i++) {
356 ping = IXGBE_PF_CONTROL_MSG;
357 if (adapter->vfinfo[i].clear_to_send)
358 ping |= IXGBE_VT_MSGTYPE_CTS;
359 ixgbe_write_mbx(hw, &ping, 1, i);
360 }
361}
362
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
new file mode 100644
index 00000000000..51d1106c45a
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,47 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_SRIOV_H_
29#define _IXGBE_SRIOV_H_
30
31int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
32 int entries, u16 *hash_list, u32 vf);
33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
38void ixgbe_msg_task(struct ixgbe_adapter *adapter);
39int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
40 int vf, unsigned char *mac_addr);
41int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
44void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
45
46#endif /* _IXGBE_SRIOV_H_ */
47
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9eafddfa1b9..0db67c19b2c 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -30,7 +30,7 @@
30 30
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/mdio.h> 32#include <linux/mdio.h>
33#include <linux/list.h> 33#include <linux/netdevice.h>
34 34
35/* Vendor ID */ 35/* Vendor ID */
36#define IXGBE_INTEL_VENDOR_ID 0x8086 36#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -277,6 +277,7 @@
277#define IXGBE_DTXCTL 0x07E00 277#define IXGBE_DTXCTL 0x07E00
278 278
279#define IXGBE_DMATXCTL 0x04A80 279#define IXGBE_DMATXCTL 0x04A80
280#define IXGBE_PFDTXGSWC 0x08220
280#define IXGBE_DTXMXSZRQ 0x08100 281#define IXGBE_DTXMXSZRQ 0x08100
281#define IXGBE_DTXTCPFLGL 0x04A88 282#define IXGBE_DTXTCPFLGL 0x04A88
282#define IXGBE_DTXTCPFLGH 0x04A8C 283#define IXGBE_DTXTCPFLGH 0x04A8C
@@ -287,6 +288,8 @@
287#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ 288#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
288#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ 289#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
289#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ 290#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
291
292#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
290#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ 293#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
291/* Tx DCA Control register : 128 of these (0-127) */ 294/* Tx DCA Control register : 128 of these (0-127) */
292#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) 295#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -497,6 +500,7 @@
497/* DCB registers */ 500/* DCB registers */
498#define IXGBE_RTRPCS 0x02430 501#define IXGBE_RTRPCS 0x02430
499#define IXGBE_RTTDCS 0x04900 502#define IXGBE_RTTDCS 0x04900
503#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
500#define IXGBE_RTTPCS 0x0CD00 504#define IXGBE_RTTPCS 0x0CD00
501#define IXGBE_RTRUP2TC 0x03020 505#define IXGBE_RTRUP2TC 0x03020
502#define IXGBE_RTTUP2TC 0x0C800 506#define IXGBE_RTTUP2TC 0x0C800
@@ -730,6 +734,13 @@
730#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 734#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
731#define IXGBE_GCR_CAP_VER2 0x00040000 735#define IXGBE_GCR_CAP_VER2 0x00040000
732 736
737#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
738#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
739#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
740#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
741#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
742 IXGBE_GCR_EXT_VT_MODE_64)
743
733/* Time Sync Registers */ 744/* Time Sync Registers */
734#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 745#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
735#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ 746#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1065,6 +1076,8 @@
1065/* VFRE bitmask */ 1076/* VFRE bitmask */
1066#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF 1077#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
1067 1078
1079#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
1080
1068/* RDHMPN and TDHMPN bitmasks */ 1081/* RDHMPN and TDHMPN bitmasks */
1069#define IXGBE_RDHMPN_RDICADDR 0x007FF800 1082#define IXGBE_RDHMPN_RDICADDR 0x007FF800
1070#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 1083#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
@@ -1295,6 +1308,7 @@
1295/* VLAN pool filtering masks */ 1308/* VLAN pool filtering masks */
1296#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ 1309#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
1297#define IXGBE_VLVF_ENTRIES 64 1310#define IXGBE_VLVF_ENTRIES 64
1311#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
1298 1312
1299#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 1313#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
1300 1314
@@ -1843,6 +1857,12 @@
1843#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ 1857#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
1844#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 1858#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
1845 1859
1860/* SR-IOV specific macros */
1861#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
1862#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
1863#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
1864#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
1865
1846/* Little Endian defines */ 1866/* Little Endian defines */
1847#ifndef __le32 1867#ifndef __le32
1848#define __le32 u32 1868#define __le32 u32
@@ -2385,7 +2405,7 @@ struct ixgbe_mac_operations {
2385 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2405 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2386 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2406 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2387 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2407 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2388 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *); 2408 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2389 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 2409 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
2390 ixgbe_mc_addr_itr); 2410 ixgbe_mc_addr_itr);
2391 s32 (*enable_mc)(struct ixgbe_hw *); 2411 s32 (*enable_mc)(struct ixgbe_hw *);
@@ -2463,6 +2483,37 @@ struct ixgbe_phy_info {
2463 bool multispeed_fiber; 2483 bool multispeed_fiber;
2464}; 2484};
2465 2485
2486#include "ixgbe_mbx.h"
2487
2488struct ixgbe_mbx_operations {
2489 s32 (*init_params)(struct ixgbe_hw *hw);
2490 s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
2491 s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
2492 s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
2493 s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
2494 s32 (*check_for_msg)(struct ixgbe_hw *, u16);
2495 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
2496 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
2497};
2498
2499struct ixgbe_mbx_stats {
2500 u32 msgs_tx;
2501 u32 msgs_rx;
2502
2503 u32 acks;
2504 u32 reqs;
2505 u32 rsts;
2506};
2507
2508struct ixgbe_mbx_info {
2509 struct ixgbe_mbx_operations ops;
2510 struct ixgbe_mbx_stats stats;
2511 u32 timeout;
2512 u32 usec_delay;
2513 u32 v2p_mailbox;
2514 u16 size;
2515};
2516
2466struct ixgbe_hw { 2517struct ixgbe_hw {
2467 u8 __iomem *hw_addr; 2518 u8 __iomem *hw_addr;
2468 void *back; 2519 void *back;
@@ -2472,6 +2523,7 @@ struct ixgbe_hw {
2472 struct ixgbe_phy_info phy; 2523 struct ixgbe_phy_info phy;
2473 struct ixgbe_eeprom_info eeprom; 2524 struct ixgbe_eeprom_info eeprom;
2474 struct ixgbe_bus_info bus; 2525 struct ixgbe_bus_info bus;
2526 struct ixgbe_mbx_info mbx;
2475 u16 device_id; 2527 u16 device_id;
2476 u16 vendor_id; 2528 u16 vendor_id;
2477 u16 subsystem_device_id; 2529 u16 subsystem_device_id;
@@ -2486,6 +2538,7 @@ struct ixgbe_info {
2486 struct ixgbe_mac_operations *mac_ops; 2538 struct ixgbe_mac_operations *mac_ops;
2487 struct ixgbe_eeprom_operations *eeprom_ops; 2539 struct ixgbe_eeprom_operations *eeprom_ops;
2488 struct ixgbe_phy_operations *phy_ops; 2540 struct ixgbe_phy_operations *phy_ops;
2541 struct ixgbe_mbx_operations *mbx_ops;
2489}; 2542};
2490 2543
2491 2544
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
new file mode 100644
index 00000000000..dd4e0d27e8c
--- /dev/null
+++ b/drivers/net/ixgbevf/Makefile
@@ -0,0 +1,38 @@
1################################################################################
2#
3# Intel 82599 Virtual Function driver
4# Copyright(c) 1999 - 2009 Intel Corporation.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License,
8# version 2, as published by the Free Software Foundation.
9#
10# This program is distributed in the hope it will be useful, but WITHOUT
11# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25#
26################################################################################
27
28#
29# Makefile for the Intel(R) 82599 VF ethernet driver
30#
31
32obj-$(CONFIG_IXGBEVF) += ixgbevf.o
33
34ixgbevf-objs := vf.o \
35 mbx.o \
36 ethtool.o \
37 ixgbevf_main.o
38
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
new file mode 100644
index 00000000000..c44fdb05447
--- /dev/null
+++ b/drivers/net/ixgbevf/defines.h
@@ -0,0 +1,292 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_DEFINES_H_
29#define _IXGBEVF_DEFINES_H_
30
31/* Device IDs */
32#define IXGBE_DEV_ID_82599_VF 0x10ED
33
34#define IXGBE_VF_IRQ_CLEAR_MASK 7
35#define IXGBE_VF_MAX_TX_QUEUES 1
36#define IXGBE_VF_MAX_RX_QUEUES 1
37#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
38
39/* Link speed */
40typedef u32 ixgbe_link_speed;
41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
43
44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
46#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
47#define IXGBE_LINKS_UP 0x40000000
48#define IXGBE_LINKS_SPEED 0x20000000
49
50/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
51#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
52#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
53#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
54
55/* Interrupt Vector Allocation Registers */
56#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
57
58#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
59
60/* Receive Config masks */
61#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
62#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
63#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
64#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
65
66/* DCA Control */
67#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
68
69/* PSRTYPE bit definitions */
70#define IXGBE_PSRTYPE_TCPHDR 0x00000010
71#define IXGBE_PSRTYPE_UDPHDR 0x00000020
72#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
73#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
74#define IXGBE_PSRTYPE_L2HDR 0x00001000
75
76/* SRRCTL bit definitions */
77#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
78#define IXGBE_SRRCTL_RDMTS_SHIFT 22
79#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
80#define IXGBE_SRRCTL_DROP_EN 0x10000000
81#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
82#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
83#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
84#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
85#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
86#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
87#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
88#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
89
90/* Receive Descriptor bit definitions */
91#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
92#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
93#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
94#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
95#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
96#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
97#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
98#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
99#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
100#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
101#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
102#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
103#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
104#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
105#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
106#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
107#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
108#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
109#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
110#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
111#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
112#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
113#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
114#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
115#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
116#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
117#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
118#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
119#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
120#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
121#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
122#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
123#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
124#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
125#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
126#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
127#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
128#define IXGBE_RXD_PRI_SHIFT 13
129#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
130#define IXGBE_RXD_CFI_SHIFT 12
131
132#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
133#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
134#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
135#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
136#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
137#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
138#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
139#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
140#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
141#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
142#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
143
144#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
145#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
146#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
147#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
148#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
149#define IXGBE_RXDADV_RSCCNT_SHIFT 17
150#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
151#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
152#define IXGBE_RXDADV_SPH 0x8000
153
154#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
155 IXGBE_RXD_ERR_CE | \
156 IXGBE_RXD_ERR_LE | \
157 IXGBE_RXD_ERR_PE | \
158 IXGBE_RXD_ERR_OSE | \
159 IXGBE_RXD_ERR_USE)
160
161#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
162 IXGBE_RXDADV_ERR_CE | \
163 IXGBE_RXDADV_ERR_LE | \
164 IXGBE_RXDADV_ERR_PE | \
165 IXGBE_RXDADV_ERR_OSE | \
166 IXGBE_RXDADV_ERR_USE)
167
168#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
169#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
170#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
171#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
172#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
173#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
174#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
175#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
176#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
177
178/* Transmit Descriptor - Advanced */
179union ixgbe_adv_tx_desc {
180 struct {
181 __le64 buffer_addr; /* Address of descriptor's data buf */
182 __le32 cmd_type_len;
183 __le32 olinfo_status;
184 } read;
185 struct {
186 __le64 rsvd; /* Reserved */
187 __le32 nxtseq_seed;
188 __le32 status;
189 } wb;
190};
191
192/* Receive Descriptor - Advanced */
193union ixgbe_adv_rx_desc {
194 struct {
195 __le64 pkt_addr; /* Packet buffer address */
196 __le64 hdr_addr; /* Header buffer address */
197 } read;
198 struct {
199 struct {
200 union {
201 __le32 data;
202 struct {
203 __le16 pkt_info; /* RSS, Pkt type */
204 __le16 hdr_info; /* Splithdr, hdrlen */
205 } hs_rss;
206 } lo_dword;
207 union {
208 __le32 rss; /* RSS Hash */
209 struct {
210 __le16 ip_id; /* IP id */
211 __le16 csum; /* Packet Checksum */
212 } csum_ip;
213 } hi_dword;
214 } lower;
215 struct {
216 __le32 status_error; /* ext status/error */
217 __le16 length; /* Packet length */
218 __le16 vlan; /* VLAN tag */
219 } upper;
220 } wb; /* writeback */
221};
222
223/* Context descriptors */
224struct ixgbe_adv_tx_context_desc {
225 __le32 vlan_macip_lens;
226 __le32 seqnum_seed;
227 __le32 type_tucmd_mlhl;
228 __le32 mss_l4len_idx;
229};
230
231/* Adv Transmit Descriptor Config Masks */
232#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
233#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
234#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
235#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
236#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
237#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
238#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
239#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
240#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
241#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
242#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
243#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
244#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
245#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
246#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
247#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
248#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
249#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
250 IXGBE_ADVTXD_POPTS_SHIFT)
251#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
252 IXGBE_ADVTXD_POPTS_SHIFT)
253#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
254#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
255#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
256#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
257#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
258
259/* Interrupt register bitmasks */
260
261/* Extended Interrupt Cause Read */
262#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
263#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
264#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
265
266/* Extended Interrupt Cause Set */
267#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
268#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
269#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
270
271/* Extended Interrupt Mask Set */
272#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
273#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
274#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
275
276/* Extended Interrupt Mask Clear */
277#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
278#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
279#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
280
281#define IXGBE_EIMS_ENABLE_MASK ( \
282 IXGBE_EIMS_RTX_QUEUE | \
283 IXGBE_EIMS_MAILBOX | \
284 IXGBE_EIMS_OTHER)
285
286#define IXGBE_EITR_CNT_WDIS 0x80000000
287
288/* Error Codes */
289#define IXGBE_ERR_INVALID_MAC_ADDR -1
290#define IXGBE_ERR_RESET_FAILED -2
291
292#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 00000000000..399be0c34c3
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,716 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for ixgbevf */
29
30#include <linux/types.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/ethtool.h>
35#include <linux/vmalloc.h>
36#include <linux/if_vlan.h>
37#include <linux/uaccess.h>
38
39#include "ixgbevf.h"
40
41#define IXGBE_ALL_RAR_ENTRIES 16
42
43#ifdef ETHTOOL_GSTATS
44struct ixgbe_stats {
45 char stat_string[ETH_GSTRING_LEN];
46 int sizeof_stat;
47 int stat_offset;
48 int base_stat_offset;
49};
50
51#define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \
52 offsetof(struct ixgbevf_adapter, m), \
53 offsetof(struct ixgbevf_adapter, b)
54static struct ixgbe_stats ixgbe_gstrings_stats[] = {
55 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)},
56 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)},
57 {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)},
58 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)},
59 {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)},
60 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)},
61 {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)},
62 {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)},
63 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)},
64 {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)},
65};
66
67#define IXGBE_QUEUE_STATS_LEN 0
68#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
69
70#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
71#endif /* ETHTOOL_GSTATS */
72#ifdef ETHTOOL_TEST
73static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
74 "Register test (offline)",
75 "Link test (on/offline)"
76};
77#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
78#endif /* ETHTOOL_TEST */
79
80static int ixgbevf_get_settings(struct net_device *netdev,
81 struct ethtool_cmd *ecmd)
82{
83 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
84 struct ixgbe_hw *hw = &adapter->hw;
85 u32 link_speed = 0;
86 bool link_up;
87
88 ecmd->supported = SUPPORTED_10000baseT_Full;
89 ecmd->autoneg = AUTONEG_DISABLE;
90 ecmd->transceiver = XCVR_DUMMY1;
91 ecmd->port = -1;
92
93 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
94
95 if (link_up) {
96 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
97 SPEED_10000 : SPEED_1000;
98 ecmd->duplex = DUPLEX_FULL;
99 } else {
100 ecmd->speed = -1;
101 ecmd->duplex = -1;
102 }
103
104 return 0;
105}
106
107static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
108{
109 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
110 return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
111}
112
113static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
114{
115 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
116 if (data)
117 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
118 else
119 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
120
121 if (netif_running(netdev)) {
122 if (!adapter->dev_closed)
123 ixgbevf_reinit_locked(adapter);
124 } else {
125 ixgbevf_reset(adapter);
126 }
127
128 return 0;
129}
130
131static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
132{
133 if (data) {
134 netdev->features |= NETIF_F_TSO;
135 netdev->features |= NETIF_F_TSO6;
136 } else {
137 netif_tx_stop_all_queues(netdev);
138 netdev->features &= ~NETIF_F_TSO;
139 netdev->features &= ~NETIF_F_TSO6;
140 netif_tx_start_all_queues(netdev);
141 }
142 return 0;
143}
144
145static u32 ixgbevf_get_msglevel(struct net_device *netdev)
146{
147 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
148 return adapter->msg_enable;
149}
150
151static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
152{
153 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
154 adapter->msg_enable = data;
155}
156
157#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
158
159static char *ixgbevf_reg_names[] = {
160 "IXGBE_VFCTRL",
161 "IXGBE_VFSTATUS",
162 "IXGBE_VFLINKS",
163 "IXGBE_VFRXMEMWRAP",
164 "IXGBE_VFRTIMER",
165 "IXGBE_VTEICR",
166 "IXGBE_VTEICS",
167 "IXGBE_VTEIMS",
168 "IXGBE_VTEIMC",
169 "IXGBE_VTEIAC",
170 "IXGBE_VTEIAM",
171 "IXGBE_VTEITR",
172 "IXGBE_VTIVAR",
173 "IXGBE_VTIVAR_MISC",
174 "IXGBE_VFRDBAL0",
175 "IXGBE_VFRDBAL1",
176 "IXGBE_VFRDBAH0",
177 "IXGBE_VFRDBAH1",
178 "IXGBE_VFRDLEN0",
179 "IXGBE_VFRDLEN1",
180 "IXGBE_VFRDH0",
181 "IXGBE_VFRDH1",
182 "IXGBE_VFRDT0",
183 "IXGBE_VFRDT1",
184 "IXGBE_VFRXDCTL0",
185 "IXGBE_VFRXDCTL1",
186 "IXGBE_VFSRRCTL0",
187 "IXGBE_VFSRRCTL1",
188 "IXGBE_VFPSRTYPE",
189 "IXGBE_VFTDBAL0",
190 "IXGBE_VFTDBAL1",
191 "IXGBE_VFTDBAH0",
192 "IXGBE_VFTDBAH1",
193 "IXGBE_VFTDLEN0",
194 "IXGBE_VFTDLEN1",
195 "IXGBE_VFTDH0",
196 "IXGBE_VFTDH1",
197 "IXGBE_VFTDT0",
198 "IXGBE_VFTDT1",
199 "IXGBE_VFTXDCTL0",
200 "IXGBE_VFTXDCTL1",
201 "IXGBE_VFTDWBAL0",
202 "IXGBE_VFTDWBAL1",
203 "IXGBE_VFTDWBAH0",
204 "IXGBE_VFTDWBAH1"
205};
206
207
208static int ixgbevf_get_regs_len(struct net_device *netdev)
209{
210 return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
211}
212
213static void ixgbevf_get_regs(struct net_device *netdev,
214 struct ethtool_regs *regs,
215 void *p)
216{
217 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
218 struct ixgbe_hw *hw = &adapter->hw;
219 u32 *regs_buff = p;
220 u32 regs_len = ixgbevf_get_regs_len(netdev);
221 u8 i;
222
223 memset(p, 0, regs_len);
224
225 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
226
227 /* General Registers */
228 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
229 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
230 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
231 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
232 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
233
234 /* Interrupt */
235 /* don't read EICR because it can clear interrupt causes, instead
236 * read EICS which is a shadow but doesn't clear EICR */
237 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
238 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
239 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
240 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
241 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
242 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
243 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
244 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
245 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
246
247 /* Receive DMA */
248 for (i = 0; i < 2; i++)
249 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
250 for (i = 0; i < 2; i++)
251 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
252 for (i = 0; i < 2; i++)
253 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
254 for (i = 0; i < 2; i++)
255 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
256 for (i = 0; i < 2; i++)
257 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
258 for (i = 0; i < 2; i++)
259 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
260 for (i = 0; i < 2; i++)
261 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
262
263 /* Receive */
264 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
265
266 /* Transmit */
267 for (i = 0; i < 2; i++)
268 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
269 for (i = 0; i < 2; i++)
270 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
271 for (i = 0; i < 2; i++)
272 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
273 for (i = 0; i < 2; i++)
274 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
275 for (i = 0; i < 2; i++)
276 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
277 for (i = 0; i < 2; i++)
278 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
279 for (i = 0; i < 2; i++)
280 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
281 for (i = 0; i < 2; i++)
282 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
283
284 for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
285 hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
286}
287
288static void ixgbevf_get_drvinfo(struct net_device *netdev,
289 struct ethtool_drvinfo *drvinfo)
290{
291 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
292
293 strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
294 strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
295
296 strlcpy(drvinfo->fw_version, "N/A", 4);
297 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
298}
299
300static void ixgbevf_get_ringparam(struct net_device *netdev,
301 struct ethtool_ringparam *ring)
302{
303 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
304 struct ixgbevf_ring *tx_ring = adapter->tx_ring;
305 struct ixgbevf_ring *rx_ring = adapter->rx_ring;
306
307 ring->rx_max_pending = IXGBEVF_MAX_RXD;
308 ring->tx_max_pending = IXGBEVF_MAX_TXD;
309 ring->rx_mini_max_pending = 0;
310 ring->rx_jumbo_max_pending = 0;
311 ring->rx_pending = rx_ring->count;
312 ring->tx_pending = tx_ring->count;
313 ring->rx_mini_pending = 0;
314 ring->rx_jumbo_pending = 0;
315}
316
317static int ixgbevf_set_ringparam(struct net_device *netdev,
318 struct ethtool_ringparam *ring)
319{
320 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
321 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
322 int i, err;
323 u32 new_rx_count, new_tx_count;
324 bool need_tx_update = false;
325 bool need_rx_update = false;
326
327 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
328 return -EINVAL;
329
330 new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
331 new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
332 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
333
334 new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
335 new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
336 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
337
338 if ((new_tx_count == adapter->tx_ring->count) &&
339 (new_rx_count == adapter->rx_ring->count)) {
340 /* nothing to do */
341 return 0;
342 }
343
344 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
345 msleep(1);
346
347 if (new_tx_count != adapter->tx_ring_count) {
348 tx_ring = kcalloc(adapter->num_tx_queues,
349 sizeof(struct ixgbevf_ring), GFP_KERNEL);
350 if (!tx_ring) {
351 err = -ENOMEM;
352 goto err_setup;
353 }
354 memcpy(tx_ring, adapter->tx_ring,
355 adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
356 for (i = 0; i < adapter->num_tx_queues; i++) {
357 tx_ring[i].count = new_tx_count;
358 err = ixgbevf_setup_tx_resources(adapter,
359 &tx_ring[i]);
360 if (err) {
361 while (i) {
362 i--;
363 ixgbevf_free_tx_resources(adapter,
364 &tx_ring[i]);
365 }
366 kfree(tx_ring);
367 goto err_setup;
368 }
369 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
370 }
371 need_tx_update = true;
372 }
373
374 if (new_rx_count != adapter->rx_ring_count) {
375 rx_ring = kcalloc(adapter->num_rx_queues,
376 sizeof(struct ixgbevf_ring), GFP_KERNEL);
377 if ((!rx_ring) && (need_tx_update)) {
378 err = -ENOMEM;
379 goto err_rx_setup;
380 }
381 memcpy(rx_ring, adapter->rx_ring,
382 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
383 for (i = 0; i < adapter->num_rx_queues; i++) {
384 rx_ring[i].count = new_rx_count;
385 err = ixgbevf_setup_rx_resources(adapter,
386 &rx_ring[i]);
387 if (err) {
388 while (i) {
389 i--;
390 ixgbevf_free_rx_resources(adapter,
391 &rx_ring[i]);
392 }
393 kfree(rx_ring);
394 goto err_rx_setup;
395 }
396 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
397 }
398 need_rx_update = true;
399 }
400
401err_rx_setup:
402 /* if rings need to be updated, here's the place to do it in one shot */
403 if (need_tx_update || need_rx_update) {
404 if (netif_running(netdev))
405 ixgbevf_down(adapter);
406 }
407
408 /* tx */
409 if (need_tx_update) {
410 kfree(adapter->tx_ring);
411 adapter->tx_ring = tx_ring;
412 tx_ring = NULL;
413 adapter->tx_ring_count = new_tx_count;
414 }
415
416 /* rx */
417 if (need_rx_update) {
418 kfree(adapter->rx_ring);
419 adapter->rx_ring = rx_ring;
420 rx_ring = NULL;
421 adapter->rx_ring_count = new_rx_count;
422 }
423
424 /* success! */
425 err = 0;
426 if (netif_running(netdev))
427 ixgbevf_up(adapter);
428
429err_setup:
430 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
431 return err;
432}
433
434static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
435{
436 switch (stringset) {
437 case ETH_SS_TEST:
438 return IXGBE_TEST_LEN;
439 case ETH_SS_STATS:
440 return IXGBE_GLOBAL_STATS_LEN;
441 default:
442 return -EINVAL;
443 }
444}
445
446static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
447 struct ethtool_stats *stats, u64 *data)
448{
449 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
450 int i;
451
452 ixgbevf_update_stats(adapter);
453 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
454 char *p = (char *)adapter +
455 ixgbe_gstrings_stats[i].stat_offset;
456 char *b = (char *)adapter +
457 ixgbe_gstrings_stats[i].base_stat_offset;
458 data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
459 sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
460 ((ixgbe_gstrings_stats[i].sizeof_stat ==
461 sizeof(u64)) ? *(u64 *)b : *(u32 *)b);
462 }
463}
464
465static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
466 u8 *data)
467{
468 char *p = (char *)data;
469 int i;
470
471 switch (stringset) {
472 case ETH_SS_TEST:
473 memcpy(data, *ixgbe_gstrings_test,
474 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
475 break;
476 case ETH_SS_STATS:
477 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
478 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
479 ETH_GSTRING_LEN);
480 p += ETH_GSTRING_LEN;
481 }
482 break;
483 }
484}
485
486static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
487{
488 struct ixgbe_hw *hw = &adapter->hw;
489 bool link_up;
490 u32 link_speed = 0;
491 *data = 0;
492
493 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
494 if (!link_up)
495 *data = 1;
496
497 return *data;
498}
499
500/* ethtool register test data */
501struct ixgbevf_reg_test {
502 u16 reg;
503 u8 array_len;
504 u8 test_type;
505 u32 mask;
506 u32 write;
507};
508
509/* In the hardware, registers are laid out either singly, in arrays
510 * spaced 0x40 bytes apart, or in contiguous tables. We assume
511 * most tests take place on arrays or single registers (handled
512 * as a single-element array) and special-case the tables.
513 * Table tests are always pattern tests.
514 *
515 * We also make provision for some required setup steps by specifying
516 * registers to be written without any read-back testing.
517 */
518
519#define PATTERN_TEST 1
520#define SET_READ_TEST 2
521#define WRITE_NO_TEST 3
522#define TABLE32_TEST 4
523#define TABLE64_TEST_LO 5
524#define TABLE64_TEST_HI 6
525
526/* default VF register test */
527static struct ixgbevf_reg_test reg_test_vf[] = {
528 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
529 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
530 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
531 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
532 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
533 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
534 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
535 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
536 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
537 { 0, 0, 0, 0 }
538};
539
540#define REG_PATTERN_TEST(R, M, W) \
541{ \
542 u32 pat, val, before; \
543 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
544 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
545 before = readl(adapter->hw.hw_addr + R); \
546 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
547 val = readl(adapter->hw.hw_addr + R); \
548 if (val != (_test[pat] & W & M)) { \
549 hw_dbg(&adapter->hw, \
550 "pattern test reg %04X failed: got " \
551 "0x%08X expected 0x%08X\n", \
552 R, val, (_test[pat] & W & M)); \
553 *data = R; \
554 writel(before, adapter->hw.hw_addr + R); \
555 return 1; \
556 } \
557 writel(before, adapter->hw.hw_addr + R); \
558 } \
559}
560
561#define REG_SET_AND_CHECK(R, M, W) \
562{ \
563 u32 val, before; \
564 before = readl(adapter->hw.hw_addr + R); \
565 writel((W & M), (adapter->hw.hw_addr + R)); \
566 val = readl(adapter->hw.hw_addr + R); \
567 if ((W & M) != (val & M)) { \
568 printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
569 "expected 0x%08X\n", R, (val & M), (W & M)); \
570 *data = R; \
571 writel(before, (adapter->hw.hw_addr + R)); \
572 return 1; \
573 } \
574 writel(before, (adapter->hw.hw_addr + R)); \
575}
576
577static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
578{
579 struct ixgbevf_reg_test *test;
580 u32 i;
581
582 test = reg_test_vf;
583
584 /*
585 * Perform the register test, looping through the test table
586 * until we either fail or reach the null entry.
587 */
588 while (test->reg) {
589 for (i = 0; i < test->array_len; i++) {
590 switch (test->test_type) {
591 case PATTERN_TEST:
592 REG_PATTERN_TEST(test->reg + (i * 0x40),
593 test->mask,
594 test->write);
595 break;
596 case SET_READ_TEST:
597 REG_SET_AND_CHECK(test->reg + (i * 0x40),
598 test->mask,
599 test->write);
600 break;
601 case WRITE_NO_TEST:
602 writel(test->write,
603 (adapter->hw.hw_addr + test->reg)
604 + (i * 0x40));
605 break;
606 case TABLE32_TEST:
607 REG_PATTERN_TEST(test->reg + (i * 4),
608 test->mask,
609 test->write);
610 break;
611 case TABLE64_TEST_LO:
612 REG_PATTERN_TEST(test->reg + (i * 8),
613 test->mask,
614 test->write);
615 break;
616 case TABLE64_TEST_HI:
617 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
618 test->mask,
619 test->write);
620 break;
621 }
622 }
623 test++;
624 }
625
626 *data = 0;
627 return *data;
628}
629
630static void ixgbevf_diag_test(struct net_device *netdev,
631 struct ethtool_test *eth_test, u64 *data)
632{
633 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
634 bool if_running = netif_running(netdev);
635
636 set_bit(__IXGBEVF_TESTING, &adapter->state);
637 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
638 /* Offline tests */
639
640 hw_dbg(&adapter->hw, "offline testing starting\n");
641
642 /* Link test performed before hardware reset so autoneg doesn't
643 * interfere with test result */
644 if (ixgbevf_link_test(adapter, &data[1]))
645 eth_test->flags |= ETH_TEST_FL_FAILED;
646
647 if (if_running)
648 /* indicate we're in test mode */
649 dev_close(netdev);
650 else
651 ixgbevf_reset(adapter);
652
653 hw_dbg(&adapter->hw, "register testing starting\n");
654 if (ixgbevf_reg_test(adapter, &data[0]))
655 eth_test->flags |= ETH_TEST_FL_FAILED;
656
657 ixgbevf_reset(adapter);
658
659 clear_bit(__IXGBEVF_TESTING, &adapter->state);
660 if (if_running)
661 dev_open(netdev);
662 } else {
663 hw_dbg(&adapter->hw, "online testing starting\n");
664 /* Online tests */
665 if (ixgbevf_link_test(adapter, &data[1]))
666 eth_test->flags |= ETH_TEST_FL_FAILED;
667
668 /* Online tests aren't run; pass by default */
669 data[0] = 0;
670
671 clear_bit(__IXGBEVF_TESTING, &adapter->state);
672 }
673 msleep_interruptible(4 * 1000);
674}
675
676static int ixgbevf_nway_reset(struct net_device *netdev)
677{
678 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
679
680 if (netif_running(netdev)) {
681 if (!adapter->dev_closed)
682 ixgbevf_reinit_locked(adapter);
683 }
684
685 return 0;
686}
687
688static struct ethtool_ops ixgbevf_ethtool_ops = {
689 .get_settings = ixgbevf_get_settings,
690 .get_drvinfo = ixgbevf_get_drvinfo,
691 .get_regs_len = ixgbevf_get_regs_len,
692 .get_regs = ixgbevf_get_regs,
693 .nway_reset = ixgbevf_nway_reset,
694 .get_link = ethtool_op_get_link,
695 .get_ringparam = ixgbevf_get_ringparam,
696 .set_ringparam = ixgbevf_set_ringparam,
697 .get_rx_csum = ixgbevf_get_rx_csum,
698 .set_rx_csum = ixgbevf_set_rx_csum,
699 .get_tx_csum = ethtool_op_get_tx_csum,
700 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
701 .get_sg = ethtool_op_get_sg,
702 .set_sg = ethtool_op_set_sg,
703 .get_msglevel = ixgbevf_get_msglevel,
704 .set_msglevel = ixgbevf_set_msglevel,
705 .get_tso = ethtool_op_get_tso,
706 .set_tso = ixgbevf_set_tso,
707 .self_test = ixgbevf_diag_test,
708 .get_sset_count = ixgbevf_get_sset_count,
709 .get_strings = ixgbevf_get_strings,
710 .get_ethtool_stats = ixgbevf_get_ethtool_stats,
711};
712
713void ixgbevf_set_ethtool_ops(struct net_device *netdev)
714{
715 SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
716}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 00000000000..f7015efbff0
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,318 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_H_
29#define _IXGBEVF_H_
30
31#include <linux/types.h>
32#include <linux/timer.h>
33#include <linux/io.h>
34#include <linux/netdevice.h>
35
36#include "vf.h"
37
38/* wrapper around a pointer to a socket buffer,
39 * so a DMA handle can be stored along with the buffer */
40struct ixgbevf_tx_buffer {
41 struct sk_buff *skb;
42 dma_addr_t dma;
43 unsigned long time_stamp;
44 u16 length;
45 u16 next_to_watch;
46 u16 mapped_as_page;
47};
48
49struct ixgbevf_rx_buffer {
50 struct sk_buff *skb;
51 dma_addr_t dma;
52 struct page *page;
53 dma_addr_t page_dma;
54 unsigned int page_offset;
55};
56
57struct ixgbevf_ring {
58 struct ixgbevf_adapter *adapter; /* backlink */
59 void *desc; /* descriptor ring memory */
60 dma_addr_t dma; /* phys. address of descriptor ring */
61 unsigned int size; /* length in bytes */
62 unsigned int count; /* amount of descriptors */
63 unsigned int next_to_use;
64 unsigned int next_to_clean;
65
66 int queue_index; /* needed for multiqueue queue management */
67 union {
68 struct ixgbevf_tx_buffer *tx_buffer_info;
69 struct ixgbevf_rx_buffer *rx_buffer_info;
70 };
71
72 u16 head;
73 u16 tail;
74
75 unsigned int total_bytes;
76 unsigned int total_packets;
77
78 u16 reg_idx; /* holds the special value that gets the hardware register
79 * offset associated with this ring, which is different
80 * for DCB and RSS modes */
81
82#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
83 /* cpu for tx queue */
84 int cpu;
85#endif
86
87 u64 v_idx; /* maps directly to the index for this ring in the hardware
88 * vector array, can also be used for finding the bit in EICR
89 * and friends that represents the vector for this ring */
90
91 u16 work_limit; /* max work per interrupt */
92 u16 rx_buf_len;
93};
94
95enum ixgbevf_ring_f_enum {
96 RING_F_NONE = 0,
97 RING_F_ARRAY_SIZE /* must be last in enum set */
98};
99
100struct ixgbevf_ring_feature {
101 int indices;
102 int mask;
103};
104
105/* How many Rx Buffers do we bundle into one write to the hardware ? */
106#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
107
108#define MAX_RX_QUEUES 1
109#define MAX_TX_QUEUES 1
110
111#define IXGBEVF_DEFAULT_TXD 1024
112#define IXGBEVF_DEFAULT_RXD 512
113#define IXGBEVF_MAX_TXD 4096
114#define IXGBEVF_MIN_TXD 64
115#define IXGBEVF_MAX_RXD 4096
116#define IXGBEVF_MIN_RXD 64
117
118/* Supported Rx Buffer Sizes */
119#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
120#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
121#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
122#define IXGBEVF_RXBUFFER_2048 2048
123#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
124
125#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
126
127#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
128
129#define IXGBE_TX_FLAGS_CSUM (u32)(1)
130#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
131#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
132#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
133#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
134#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
135#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
136#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
137#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
138
139/* MAX_MSIX_Q_VECTORS of these are allocated,
140 * but we only use one per queue-specific vector.
141 */
142struct ixgbevf_q_vector {
143 struct ixgbevf_adapter *adapter;
144 struct napi_struct napi;
145 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
146 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
147 u8 rxr_count; /* Rx ring count assigned to this vector */
148 u8 txr_count; /* Tx ring count assigned to this vector */
149 u8 tx_itr;
150 u8 rx_itr;
151 u32 eitr;
152 int v_idx; /* vector index in list */
153};
154
155/* Helper macros to switch between ints/sec and what the register uses.
156 * And yes, it's the same math going both ways. The lowest value
157 * supported by all of the ixgbe hardware is 8.
158 */
159#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
160 ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
161#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
162
163#define IXGBE_DESC_UNUSED(R) \
164 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
165 (R)->next_to_clean - (R)->next_to_use - 1)
166
167#define IXGBE_RX_DESC_ADV(R, i) \
168 (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
169#define IXGBE_TX_DESC_ADV(R, i) \
170 (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
171#define IXGBE_TX_CTXTDESC_ADV(R, i) \
172 (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
173
174#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
175
176#define OTHER_VECTOR 1
177#define NON_Q_VECTORS (OTHER_VECTOR)
178
179#define MAX_MSIX_Q_VECTORS 2
180#define MAX_MSIX_COUNT 2
181
182#define MIN_MSIX_Q_VECTORS 2
183#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
184
185/* board specific private data structure */
186struct ixgbevf_adapter {
187 struct timer_list watchdog_timer;
188#ifdef NETIF_F_HW_VLAN_TX
189 struct vlan_group *vlgrp;
190#endif
191 u16 bd_number;
192 struct work_struct reset_task;
193 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
194 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
195
196 /* Interrupt Throttle Rate */
197 u32 itr_setting;
198 u16 eitr_low;
199 u16 eitr_high;
200
201 /* TX */
202 struct ixgbevf_ring *tx_ring; /* One per active queue */
203 int num_tx_queues;
204 u64 restart_queue;
205 u64 hw_csum_tx_good;
206 u64 lsc_int;
207 u64 hw_tso_ctxt;
208 u64 hw_tso6_ctxt;
209 u32 tx_timeout_count;
210 bool detect_tx_hung;
211
212 /* RX */
213 struct ixgbevf_ring *rx_ring; /* One per active queue */
214 int num_rx_queues;
215 int num_rx_pools; /* == num_rx_queues in 82598 */
216 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
217 u64 hw_csum_rx_error;
218 u64 hw_rx_no_dma_resources;
219 u64 hw_csum_rx_good;
220 u64 non_eop_descs;
221 int num_msix_vectors;
222 int max_msix_q_vectors; /* true count of q_vectors for device */
223 struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
224 struct msix_entry *msix_entries;
225
226 u64 rx_hdr_split;
227 u32 alloc_rx_page_failed;
228 u32 alloc_rx_buff_failed;
229
230 /* Some features need tri-state capability,
231 * thus the additional *_CAPABLE flags.
232 */
233 u32 flags;
234#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
235#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
236#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
237#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
238#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
239#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
240#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
241#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
242#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
243 /* OS defined structs */
244 struct net_device *netdev;
245 struct pci_dev *pdev;
246 struct net_device_stats net_stats;
247
248 /* structs defined in ixgbe_vf.h */
249 struct ixgbe_hw hw;
250 u16 msg_enable;
251 struct ixgbevf_hw_stats stats;
252 u64 zero_base;
253 /* Interrupt Throttle Rate */
254 u32 eitr_param;
255
256 unsigned long state;
257 u32 *config_space;
258 u64 tx_busy;
259 unsigned int tx_ring_count;
260 unsigned int rx_ring_count;
261
262 u32 link_speed;
263 bool link_up;
264 unsigned long link_check_timeout;
265
266 struct work_struct watchdog_task;
267 bool netdev_registered;
268 bool dev_closed;
269};
270
271enum ixbgevf_state_t {
272 __IXGBEVF_TESTING,
273 __IXGBEVF_RESETTING,
274 __IXGBEVF_DOWN
275};
276
277enum ixgbevf_boards {
278 board_82599_vf,
279};
280
281extern struct ixgbevf_info ixgbevf_vf_info;
282extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
283
284/* needed by ethtool.c */
285extern char ixgbevf_driver_name[];
286extern const char ixgbevf_driver_version[];
287
288extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
289extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
290extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
291extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
292extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
293extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
294 struct ixgbevf_ring *);
295extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
296 struct ixgbevf_ring *);
297extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
298 struct ixgbevf_ring *);
299extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
300 struct ixgbevf_ring *);
301extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
302
303#ifdef ETHTOOL_OPS_COMPAT
304extern int ethtool_ioctl(struct ifreq *ifr);
305
306#endif
307extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
308extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
309
310#ifdef DEBUG
311extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
312#define hw_dbg(hw, format, arg...) \
313 printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
314#else
315#define hw_dbg(hw, format, arg...) do {} while (0)
316#endif
317
318#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 00000000000..b9f10d05049
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3578 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
32#include <linux/types.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/netdevice.h>
36#include <linux/vmalloc.h>
37#include <linux/string.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41#include <linux/ipv6.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
44#include <linux/ethtool.h>
45#include <linux/if_vlan.h>
46
47#include "ixgbevf.h"
48
49char ixgbevf_driver_name[] = "ixgbevf";
50static const char ixgbevf_driver_string[] =
51 "Intel(R) 82599 Virtual Function";
52
53#define DRV_VERSION "1.0.0-k0"
54const char ixgbevf_driver_version[] = DRV_VERSION;
55static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
56
57static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
58 [board_82599_vf] = &ixgbevf_vf_info,
59};
60
61/* ixgbevf_pci_tbl - PCI Device ID Table
62 *
63 * Wildcard entries (PCI_ANY_ID) should come last
64 * Last entry must be all 0s
65 *
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
67 * Class, Class Mask, private data (not used) }
68 */
69static struct pci_device_id ixgbevf_pci_tbl[] = {
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
71 board_82599_vf},
72
73 /* required last entry */
74 {0, }
75};
76MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
77
78MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
79MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
80MODULE_LICENSE("GPL");
81MODULE_VERSION(DRV_VERSION);
82
83#define DEFAULT_DEBUG_LEVEL_SHIFT 3
84
85/* forward decls */
86static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
87static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
88 u32 itr_reg);
89
90static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
91 struct ixgbevf_ring *rx_ring,
92 u32 val)
93{
94 /*
95 * Force memory writes to complete before letting h/w
96 * know there are new descriptors to fetch. (Only
97 * applicable for weak-ordered memory model archs,
98 * such as IA-64).
99 */
100 wmb();
101 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
102}
103
104/*
105 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
106 * @adapter: pointer to adapter struct
107 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
108 * @queue: queue to map the corresponding interrupt to
109 * @msix_vector: the vector to map to the corresponding queue
110 *
111 */
112static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
113 u8 queue, u8 msix_vector)
114{
115 u32 ivar, index;
116 struct ixgbe_hw *hw = &adapter->hw;
117 if (direction == -1) {
118 /* other causes */
119 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
120 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
121 ivar &= ~0xFF;
122 ivar |= msix_vector;
123 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
124 } else {
125 /* tx or rx causes */
126 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
127 index = ((16 * (queue & 1)) + (8 * direction));
128 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
129 ivar &= ~(0xFF << index);
130 ivar |= (msix_vector << index);
131 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
132 }
133}
134
135static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
136 struct ixgbevf_tx_buffer
137 *tx_buffer_info)
138{
139 if (tx_buffer_info->dma) {
140 if (tx_buffer_info->mapped_as_page)
141 pci_unmap_page(adapter->pdev,
142 tx_buffer_info->dma,
143 tx_buffer_info->length,
144 PCI_DMA_TODEVICE);
145 else
146 pci_unmap_single(adapter->pdev,
147 tx_buffer_info->dma,
148 tx_buffer_info->length,
149 PCI_DMA_TODEVICE);
150 tx_buffer_info->dma = 0;
151 }
152 if (tx_buffer_info->skb) {
153 dev_kfree_skb_any(tx_buffer_info->skb);
154 tx_buffer_info->skb = NULL;
155 }
156 tx_buffer_info->time_stamp = 0;
157 /* tx_buffer_info must be completely set up in the transmit path */
158}
159
160static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
161 struct ixgbevf_ring *tx_ring,
162 unsigned int eop)
163{
164 struct ixgbe_hw *hw = &adapter->hw;
165 u32 head, tail;
166
167 /* Detect a transmit hang in hardware, this serializes the
168 * check with the clearing of time_stamp and movement of eop */
169 head = readl(hw->hw_addr + tx_ring->head);
170 tail = readl(hw->hw_addr + tx_ring->tail);
171 adapter->detect_tx_hung = false;
172 if ((head != tail) &&
173 tx_ring->tx_buffer_info[eop].time_stamp &&
174 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
175 /* detected Tx unit hang */
176 union ixgbe_adv_tx_desc *tx_desc;
177 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
178 printk(KERN_ERR "Detected Tx Unit Hang\n"
179 " Tx Queue <%d>\n"
180 " TDH, TDT <%x>, <%x>\n"
181 " next_to_use <%x>\n"
182 " next_to_clean <%x>\n"
183 "tx_buffer_info[next_to_clean]\n"
184 " time_stamp <%lx>\n"
185 " jiffies <%lx>\n",
186 tx_ring->queue_index,
187 head, tail,
188 tx_ring->next_to_use, eop,
189 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
190 return true;
191 }
192
193 return false;
194}
195
196#define IXGBE_MAX_TXD_PWR 14
197#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
198
199/* Tx Descriptors needed, worst case */
200#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
201 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
202#ifdef MAX_SKB_FRAGS
203#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
204 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
205#else
206#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
207#endif
208
209static void ixgbevf_tx_timeout(struct net_device *netdev);
210
211/**
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
213 * @adapter: board private structure
214 * @tx_ring: tx ring to clean
215 **/
216static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
217 struct ixgbevf_ring *tx_ring)
218{
219 struct net_device *netdev = adapter->netdev;
220 struct ixgbe_hw *hw = &adapter->hw;
221 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
222 struct ixgbevf_tx_buffer *tx_buffer_info;
223 unsigned int i, eop, count = 0;
224 unsigned int total_bytes = 0, total_packets = 0;
225
226 i = tx_ring->next_to_clean;
227 eop = tx_ring->tx_buffer_info[i].next_to_watch;
228 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
229
230 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
231 (count < tx_ring->work_limit)) {
232 bool cleaned = false;
233 for ( ; !cleaned; count++) {
234 struct sk_buff *skb;
235 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
236 tx_buffer_info = &tx_ring->tx_buffer_info[i];
237 cleaned = (i == eop);
238 skb = tx_buffer_info->skb;
239
240 if (cleaned && skb) {
241 unsigned int segs, bytecount;
242
243 /* gso_segs is currently only valid for tcp */
244 segs = skb_shinfo(skb)->gso_segs ?: 1;
245 /* multiply data chunks by size of headers */
246 bytecount = ((segs - 1) * skb_headlen(skb)) +
247 skb->len;
248 total_packets += segs;
249 total_bytes += bytecount;
250 }
251
252 ixgbevf_unmap_and_free_tx_resource(adapter,
253 tx_buffer_info);
254
255 tx_desc->wb.status = 0;
256
257 i++;
258 if (i == tx_ring->count)
259 i = 0;
260 }
261
262 eop = tx_ring->tx_buffer_info[i].next_to_watch;
263 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
264 }
265
266 tx_ring->next_to_clean = i;
267
268#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
269 if (unlikely(count && netif_carrier_ok(netdev) &&
270 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
271 /* Make sure that anybody stopping the queue after this
272 * sees the new next_to_clean.
273 */
274 smp_mb();
275#ifdef HAVE_TX_MQ
276 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
277 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
278 netif_wake_subqueue(netdev, tx_ring->queue_index);
279 ++adapter->restart_queue;
280 }
281#else
282 if (netif_queue_stopped(netdev) &&
283 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
284 netif_wake_queue(netdev);
285 ++adapter->restart_queue;
286 }
287#endif
288 }
289
290 if (adapter->detect_tx_hung) {
291 if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
292 /* schedule immediate reset if we believe we hung */
293 printk(KERN_INFO
294 "tx hang %d detected, resetting adapter\n",
295 adapter->tx_timeout_count + 1);
296 ixgbevf_tx_timeout(adapter->netdev);
297 }
298 }
299
300 /* re-arm the interrupt */
301 if ((count >= tx_ring->work_limit) &&
302 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
303 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
304 }
305
306 tx_ring->total_bytes += total_bytes;
307 tx_ring->total_packets += total_packets;
308
309 adapter->net_stats.tx_bytes += total_bytes;
310 adapter->net_stats.tx_packets += total_packets;
311
312 return (count < tx_ring->work_limit);
313}
314
315/**
316 * ixgbevf_receive_skb - Send a completed packet up the stack
317 * @q_vector: structure containing interrupt and ring information
318 * @skb: packet to send up
319 * @status: hardware indication of status of receive
320 * @rx_ring: rx descriptor ring (for a specific queue) to setup
321 * @rx_desc: rx descriptor
322 **/
323static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
324 struct sk_buff *skb, u8 status,
325 struct ixgbevf_ring *ring,
326 union ixgbe_adv_rx_desc *rx_desc)
327{
328 struct ixgbevf_adapter *adapter = q_vector->adapter;
329 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
330 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
331 int ret;
332
333 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
334 if (adapter->vlgrp && is_vlan)
335 vlan_gro_receive(&q_vector->napi,
336 adapter->vlgrp,
337 tag, skb);
338 else
339 napi_gro_receive(&q_vector->napi, skb);
340 } else {
341 if (adapter->vlgrp && is_vlan)
342 ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
343 else
344 ret = netif_rx(skb);
345 }
346}
347
348/**
349 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
350 * @adapter: address of board private structure
351 * @status_err: hardware indication of status of receive
352 * @skb: skb currently being received and modified
353 **/
354static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
355 u32 status_err, struct sk_buff *skb)
356{
357 skb->ip_summed = CHECKSUM_NONE;
358
359 /* Rx csum disabled */
360 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
361 return;
362
363 /* if IP and error */
364 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
365 (status_err & IXGBE_RXDADV_ERR_IPE)) {
366 adapter->hw_csum_rx_error++;
367 return;
368 }
369
370 if (!(status_err & IXGBE_RXD_STAT_L4CS))
371 return;
372
373 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
374 adapter->hw_csum_rx_error++;
375 return;
376 }
377
378 /* It must be a TCP or UDP packet with a valid checksum */
379 skb->ip_summed = CHECKSUM_UNNECESSARY;
380 adapter->hw_csum_rx_good++;
381}
382
383/**
384 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
385 * @adapter: address of board private structure
386 **/
387static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
388 struct ixgbevf_ring *rx_ring,
389 int cleaned_count)
390{
391 struct pci_dev *pdev = adapter->pdev;
392 union ixgbe_adv_rx_desc *rx_desc;
393 struct ixgbevf_rx_buffer *bi;
394 struct sk_buff *skb;
395 unsigned int i;
396 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
397
398 i = rx_ring->next_to_use;
399 bi = &rx_ring->rx_buffer_info[i];
400
401 while (cleaned_count--) {
402 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
403
404 if (!bi->page_dma &&
405 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
406 if (!bi->page) {
407 bi->page = netdev_alloc_page(adapter->netdev);
408 if (!bi->page) {
409 adapter->alloc_rx_page_failed++;
410 goto no_buffers;
411 }
412 bi->page_offset = 0;
413 } else {
414 /* use a half page if we're re-using */
415 bi->page_offset ^= (PAGE_SIZE / 2);
416 }
417
418 bi->page_dma = pci_map_page(pdev, bi->page,
419 bi->page_offset,
420 (PAGE_SIZE / 2),
421 PCI_DMA_FROMDEVICE);
422 }
423
424 skb = bi->skb;
425 if (!skb) {
426 skb = netdev_alloc_skb(adapter->netdev,
427 bufsz);
428
429 if (!skb) {
430 adapter->alloc_rx_buff_failed++;
431 goto no_buffers;
432 }
433
434 /*
435 * Make buffer alignment 2 beyond a 16 byte boundary
436 * this will result in a 16 byte aligned IP header after
437 * the 14 byte MAC header is removed
438 */
439 skb_reserve(skb, NET_IP_ALIGN);
440
441 bi->skb = skb;
442 }
443 if (!bi->dma) {
444 bi->dma = pci_map_single(pdev, skb->data,
445 rx_ring->rx_buf_len,
446 PCI_DMA_FROMDEVICE);
447 }
448 /* Refresh the desc even if buffer_addrs didn't change because
449 * each write-back erases this info. */
450 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
451 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
452 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
453 } else {
454 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
455 }
456
457 i++;
458 if (i == rx_ring->count)
459 i = 0;
460 bi = &rx_ring->rx_buffer_info[i];
461 }
462
463no_buffers:
464 if (rx_ring->next_to_use != i) {
465 rx_ring->next_to_use = i;
466 if (i-- == 0)
467 i = (rx_ring->count - 1);
468
469 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
470 }
471}
472
473static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
474 u64 qmask)
475{
476 u32 mask;
477 struct ixgbe_hw *hw = &adapter->hw;
478
479 mask = (qmask & 0xFFFFFFFF);
480 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
481}
482
483static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
484{
485 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
486}
487
488static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
489{
490 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
491}
492
493static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
494 struct ixgbevf_ring *rx_ring,
495 int *work_done, int work_to_do)
496{
497 struct ixgbevf_adapter *adapter = q_vector->adapter;
498 struct pci_dev *pdev = adapter->pdev;
499 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
500 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
501 struct sk_buff *skb;
502 unsigned int i;
503 u32 len, staterr;
504 u16 hdr_info;
505 bool cleaned = false;
506 int cleaned_count = 0;
507 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
508
509 i = rx_ring->next_to_clean;
510 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
511 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
512 rx_buffer_info = &rx_ring->rx_buffer_info[i];
513
514 while (staterr & IXGBE_RXD_STAT_DD) {
515 u32 upper_len = 0;
516 if (*work_done >= work_to_do)
517 break;
518 (*work_done)++;
519
520 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
521 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
522 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
523 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
524 if (hdr_info & IXGBE_RXDADV_SPH)
525 adapter->rx_hdr_split++;
526 if (len > IXGBEVF_RX_HDR_SIZE)
527 len = IXGBEVF_RX_HDR_SIZE;
528 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
529 } else {
530 len = le16_to_cpu(rx_desc->wb.upper.length);
531 }
532 cleaned = true;
533 skb = rx_buffer_info->skb;
534 prefetch(skb->data - NET_IP_ALIGN);
535 rx_buffer_info->skb = NULL;
536
537 if (rx_buffer_info->dma) {
538 pci_unmap_single(pdev, rx_buffer_info->dma,
539 rx_ring->rx_buf_len,
540 PCI_DMA_FROMDEVICE);
541 rx_buffer_info->dma = 0;
542 skb_put(skb, len);
543 }
544
545 if (upper_len) {
546 pci_unmap_page(pdev, rx_buffer_info->page_dma,
547 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
548 rx_buffer_info->page_dma = 0;
549 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
550 rx_buffer_info->page,
551 rx_buffer_info->page_offset,
552 upper_len);
553
554 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
555 (page_count(rx_buffer_info->page) != 1))
556 rx_buffer_info->page = NULL;
557 else
558 get_page(rx_buffer_info->page);
559
560 skb->len += upper_len;
561 skb->data_len += upper_len;
562 skb->truesize += upper_len;
563 }
564
565 i++;
566 if (i == rx_ring->count)
567 i = 0;
568
569 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
570 prefetch(next_rxd);
571 cleaned_count++;
572
573 next_buffer = &rx_ring->rx_buffer_info[i];
574
575 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
576 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
577 rx_buffer_info->skb = next_buffer->skb;
578 rx_buffer_info->dma = next_buffer->dma;
579 next_buffer->skb = skb;
580 next_buffer->dma = 0;
581 } else {
582 skb->next = next_buffer->skb;
583 skb->next->prev = skb;
584 }
585 adapter->non_eop_descs++;
586 goto next_desc;
587 }
588
589 /* ERR_MASK will only have valid bits if EOP set */
590 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
591 dev_kfree_skb_irq(skb);
592 goto next_desc;
593 }
594
595 ixgbevf_rx_checksum(adapter, staterr, skb);
596
597 /* probably a little skewed due to removing CRC */
598 total_rx_bytes += skb->len;
599 total_rx_packets++;
600
601 /*
602 * Work around issue of some types of VM to VM loop back
603 * packets not getting split correctly
604 */
605 if (staterr & IXGBE_RXD_STAT_LB) {
606 u32 header_fixup_len = skb->len - skb->data_len;
607 if (header_fixup_len < 14)
608 skb_push(skb, header_fixup_len);
609 }
610 skb->protocol = eth_type_trans(skb, adapter->netdev);
611
612 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
613 adapter->netdev->last_rx = jiffies;
614
615next_desc:
616 rx_desc->wb.upper.status_error = 0;
617
618 /* return some buffers to hardware, one at a time is too slow */
619 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
620 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
621 cleaned_count);
622 cleaned_count = 0;
623 }
624
625 /* use prefetched values */
626 rx_desc = next_rxd;
627 rx_buffer_info = &rx_ring->rx_buffer_info[i];
628
629 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
630 }
631
632 rx_ring->next_to_clean = i;
633 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
634
635 if (cleaned_count)
636 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
637
638 rx_ring->total_packets += total_rx_packets;
639 rx_ring->total_bytes += total_rx_bytes;
640 adapter->net_stats.rx_bytes += total_rx_bytes;
641 adapter->net_stats.rx_packets += total_rx_packets;
642
643 return cleaned;
644}
645
646/**
647 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
648 * @napi: napi struct with our devices info in it
649 * @budget: amount of work driver is allowed to do this pass, in packets
650 *
651 * This function is optimized for cleaning one queue only on a single
652 * q_vector!!!
653 **/
654static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
655{
656 struct ixgbevf_q_vector *q_vector =
657 container_of(napi, struct ixgbevf_q_vector, napi);
658 struct ixgbevf_adapter *adapter = q_vector->adapter;
659 struct ixgbevf_ring *rx_ring = NULL;
660 int work_done = 0;
661 long r_idx;
662
663 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
664 rx_ring = &(adapter->rx_ring[r_idx]);
665
666 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
667
668 /* If all Rx work done, exit the polling mode */
669 if (work_done < budget) {
670 napi_complete(napi);
671 if (adapter->itr_setting & 1)
672 ixgbevf_set_itr_msix(q_vector);
673 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
674 ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
675 }
676
677 return work_done;
678}
679
680/**
681 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
682 * @napi: napi struct with our devices info in it
683 * @budget: amount of work driver is allowed to do this pass, in packets
684 *
685 * This function will clean more than one rx queue associated with a
686 * q_vector.
687 **/
688static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
689{
690 struct ixgbevf_q_vector *q_vector =
691 container_of(napi, struct ixgbevf_q_vector, napi);
692 struct ixgbevf_adapter *adapter = q_vector->adapter;
693 struct ixgbevf_ring *rx_ring = NULL;
694 int work_done = 0, i;
695 long r_idx;
696 u64 enable_mask = 0;
697
698 /* attempt to distribute budget to each queue fairly, but don't allow
699 * the budget to go below 1 because we'll exit polling */
700 budget /= (q_vector->rxr_count ?: 1);
701 budget = max(budget, 1);
702 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
703 for (i = 0; i < q_vector->rxr_count; i++) {
704 rx_ring = &(adapter->rx_ring[r_idx]);
705 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
706 enable_mask |= rx_ring->v_idx;
707 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
708 r_idx + 1);
709 }
710
711#ifndef HAVE_NETDEV_NAPI_LIST
712 if (!netif_running(adapter->netdev))
713 work_done = 0;
714
715#endif
716 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
717 rx_ring = &(adapter->rx_ring[r_idx]);
718
719 /* If all Rx work done, exit the polling mode */
720 if (work_done < budget) {
721 napi_complete(napi);
722 if (adapter->itr_setting & 1)
723 ixgbevf_set_itr_msix(q_vector);
724 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
725 ixgbevf_irq_enable_queues(adapter, enable_mask);
726 }
727
728 return work_done;
729}
730
731
732/**
733 * ixgbevf_configure_msix - Configure MSI-X hardware
734 * @adapter: board private structure
735 *
736 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
737 * interrupts.
738 **/
739static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
740{
741 struct ixgbevf_q_vector *q_vector;
742 struct ixgbe_hw *hw = &adapter->hw;
743 int i, j, q_vectors, v_idx, r_idx;
744 u32 mask;
745
746 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
747
748 /*
749 * Populate the IVAR table and set the ITR values to the
750 * corresponding register.
751 */
752 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
753 q_vector = adapter->q_vector[v_idx];
754 /* XXX for_each_bit(...) */
755 r_idx = find_first_bit(q_vector->rxr_idx,
756 adapter->num_rx_queues);
757
758 for (i = 0; i < q_vector->rxr_count; i++) {
759 j = adapter->rx_ring[r_idx].reg_idx;
760 ixgbevf_set_ivar(adapter, 0, j, v_idx);
761 r_idx = find_next_bit(q_vector->rxr_idx,
762 adapter->num_rx_queues,
763 r_idx + 1);
764 }
765 r_idx = find_first_bit(q_vector->txr_idx,
766 adapter->num_tx_queues);
767
768 for (i = 0; i < q_vector->txr_count; i++) {
769 j = adapter->tx_ring[r_idx].reg_idx;
770 ixgbevf_set_ivar(adapter, 1, j, v_idx);
771 r_idx = find_next_bit(q_vector->txr_idx,
772 adapter->num_tx_queues,
773 r_idx + 1);
774 }
775
776 /* if this is a tx only vector halve the interrupt rate */
777 if (q_vector->txr_count && !q_vector->rxr_count)
778 q_vector->eitr = (adapter->eitr_param >> 1);
779 else if (q_vector->rxr_count)
780 /* rx only */
781 q_vector->eitr = adapter->eitr_param;
782
783 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
784 }
785
786 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
787
788 /* set up to autoclear timer, and the vectors */
789 mask = IXGBE_EIMS_ENABLE_MASK;
790 mask &= ~IXGBE_EIMS_OTHER;
791 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
792}
793
794enum latency_range {
795 lowest_latency = 0,
796 low_latency = 1,
797 bulk_latency = 2,
798 latency_invalid = 255
799};
800
801/**
802 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
803 * @adapter: pointer to adapter
804 * @eitr: eitr setting (ints per sec) to give last timeslice
805 * @itr_setting: current throttle rate in ints/second
806 * @packets: the number of packets during this measurement interval
807 * @bytes: the number of bytes during this measurement interval
808 *
809 * Stores a new ITR value based on packets and byte
810 * counts during the last interrupt. The advantage of per interrupt
811 * computation is faster updates and more accurate ITR for the current
812 * traffic pattern. Constants in this function were computed
813 * based on theoretical maximum wire speed and thresholds were set based
814 * on testing data as well as attempting to minimize response time
815 * while increasing bulk throughput.
816 **/
817static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
818 u32 eitr, u8 itr_setting,
819 int packets, int bytes)
820{
821 unsigned int retval = itr_setting;
822 u32 timepassed_us;
823 u64 bytes_perint;
824
825 if (packets == 0)
826 goto update_itr_done;
827
828
829 /* simple throttlerate management
830 * 0-20MB/s lowest (100000 ints/s)
831 * 20-100MB/s low (20000 ints/s)
832 * 100-1249MB/s bulk (8000 ints/s)
833 */
834 /* what was last interrupt timeslice? */
835 timepassed_us = 1000000/eitr;
836 bytes_perint = bytes / timepassed_us; /* bytes/usec */
837
838 switch (itr_setting) {
839 case lowest_latency:
840 if (bytes_perint > adapter->eitr_low)
841 retval = low_latency;
842 break;
843 case low_latency:
844 if (bytes_perint > adapter->eitr_high)
845 retval = bulk_latency;
846 else if (bytes_perint <= adapter->eitr_low)
847 retval = lowest_latency;
848 break;
849 case bulk_latency:
850 if (bytes_perint <= adapter->eitr_high)
851 retval = low_latency;
852 break;
853 }
854
855update_itr_done:
856 return retval;
857}
858
859/**
860 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
861 * @adapter: pointer to adapter struct
862 * @v_idx: vector index into q_vector array
863 * @itr_reg: new value to be written in *register* format, not ints/s
864 *
865 * This function is made to be called by ethtool and by the driver
866 * when it needs to update VTEITR registers at runtime. Hardware
867 * specific quirks/differences are taken care of here.
868 */
869static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
870 u32 itr_reg)
871{
872 struct ixgbe_hw *hw = &adapter->hw;
873
874 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
875
876 /*
877 * set the WDIS bit to not clear the timer bits and cause an
878 * immediate assertion of the interrupt
879 */
880 itr_reg |= IXGBE_EITR_CNT_WDIS;
881
882 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
883}
884
885static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
886{
887 struct ixgbevf_adapter *adapter = q_vector->adapter;
888 u32 new_itr;
889 u8 current_itr, ret_itr;
890 int i, r_idx, v_idx = q_vector->v_idx;
891 struct ixgbevf_ring *rx_ring, *tx_ring;
892
893 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
894 for (i = 0; i < q_vector->txr_count; i++) {
895 tx_ring = &(adapter->tx_ring[r_idx]);
896 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
897 q_vector->tx_itr,
898 tx_ring->total_packets,
899 tx_ring->total_bytes);
900 /* if the result for this queue would decrease interrupt
901 * rate for this vector then use that result */
902 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
903 q_vector->tx_itr - 1 : ret_itr);
904 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
905 r_idx + 1);
906 }
907
908 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
909 for (i = 0; i < q_vector->rxr_count; i++) {
910 rx_ring = &(adapter->rx_ring[r_idx]);
911 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
912 q_vector->rx_itr,
913 rx_ring->total_packets,
914 rx_ring->total_bytes);
915 /* if the result for this queue would decrease interrupt
916 * rate for this vector then use that result */
917 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
918 q_vector->rx_itr - 1 : ret_itr);
919 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
920 r_idx + 1);
921 }
922
923 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
924
925 switch (current_itr) {
926 /* counts and packets in update_itr are dependent on these numbers */
927 case lowest_latency:
928 new_itr = 100000;
929 break;
930 case low_latency:
931 new_itr = 20000; /* aka hwitr = ~200 */
932 break;
933 case bulk_latency:
934 default:
935 new_itr = 8000;
936 break;
937 }
938
939 if (new_itr != q_vector->eitr) {
940 u32 itr_reg;
941
942 /* save the algorithm value here, not the smoothed one */
943 q_vector->eitr = new_itr;
944 /* do an exponential smoothing */
945 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
946 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
947 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
948 }
949
950 return;
951}
952
953static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
954{
955 struct net_device *netdev = data;
956 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
957 struct ixgbe_hw *hw = &adapter->hw;
958 u32 eicr;
959 u32 msg;
960
961 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
962 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
963
964 hw->mbx.ops.read(hw, &msg, 1);
965
966 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
967 mod_timer(&adapter->watchdog_timer,
968 round_jiffies(jiffies + 10));
969
970 return IRQ_HANDLED;
971}
972
973static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
974{
975 struct ixgbevf_q_vector *q_vector = data;
976 struct ixgbevf_adapter *adapter = q_vector->adapter;
977 struct ixgbevf_ring *tx_ring;
978 int i, r_idx;
979
980 if (!q_vector->txr_count)
981 return IRQ_HANDLED;
982
983 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
984 for (i = 0; i < q_vector->txr_count; i++) {
985 tx_ring = &(adapter->tx_ring[r_idx]);
986 tx_ring->total_bytes = 0;
987 tx_ring->total_packets = 0;
988 ixgbevf_clean_tx_irq(adapter, tx_ring);
989 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
990 r_idx + 1);
991 }
992
993 if (adapter->itr_setting & 1)
994 ixgbevf_set_itr_msix(q_vector);
995
996 return IRQ_HANDLED;
997}
998
999/**
1000 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1001 * @irq: unused
1002 * @data: pointer to our q_vector struct for this interrupt vector
1003 **/
1004static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
1005{
1006 struct ixgbevf_q_vector *q_vector = data;
1007 struct ixgbevf_adapter *adapter = q_vector->adapter;
1008 struct ixgbe_hw *hw = &adapter->hw;
1009 struct ixgbevf_ring *rx_ring;
1010 int r_idx;
1011 int i;
1012
1013 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1014 for (i = 0; i < q_vector->rxr_count; i++) {
1015 rx_ring = &(adapter->rx_ring[r_idx]);
1016 rx_ring->total_bytes = 0;
1017 rx_ring->total_packets = 0;
1018 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1019 r_idx + 1);
1020 }
1021
1022 if (!q_vector->rxr_count)
1023 return IRQ_HANDLED;
1024
1025 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1026 rx_ring = &(adapter->rx_ring[r_idx]);
1027 /* disable interrupts on this vector only */
1028 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
1029 napi_schedule(&q_vector->napi);
1030
1031
1032 return IRQ_HANDLED;
1033}
1034
1035static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
1036{
1037 ixgbevf_msix_clean_rx(irq, data);
1038 ixgbevf_msix_clean_tx(irq, data);
1039
1040 return IRQ_HANDLED;
1041}
1042
1043static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1044 int r_idx)
1045{
1046 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1047
1048 set_bit(r_idx, q_vector->rxr_idx);
1049 q_vector->rxr_count++;
1050 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1051}
1052
1053static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1054 int t_idx)
1055{
1056 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1057
1058 set_bit(t_idx, q_vector->txr_idx);
1059 q_vector->txr_count++;
1060 a->tx_ring[t_idx].v_idx = 1 << v_idx;
1061}
1062
1063/**
1064 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1065 * @adapter: board private structure to initialize
1066 *
1067 * This function maps descriptor rings to the queue-specific vectors
1068 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1069 * one vector per ring/queue, but on a constrained vector budget, we
1070 * group the rings as "efficiently" as possible. You would add new
1071 * mapping configurations in here.
1072 **/
1073static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1074{
1075 int q_vectors;
1076 int v_start = 0;
1077 int rxr_idx = 0, txr_idx = 0;
1078 int rxr_remaining = adapter->num_rx_queues;
1079 int txr_remaining = adapter->num_tx_queues;
1080 int i, j;
1081 int rqpv, tqpv;
1082 int err = 0;
1083
1084 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1085
1086 /*
1087 * The ideal configuration...
1088 * We have enough vectors to map one per queue.
1089 */
1090 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1091 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1092 map_vector_to_rxq(adapter, v_start, rxr_idx);
1093
1094 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1095 map_vector_to_txq(adapter, v_start, txr_idx);
1096 goto out;
1097 }
1098
1099 /*
1100 * If we don't have enough vectors for a 1-to-1
1101 * mapping, we'll have to group them so there are
1102 * multiple queues per vector.
1103 */
1104 /* Re-adjusting *qpv takes care of the remainder. */
1105 for (i = v_start; i < q_vectors; i++) {
1106 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1107 for (j = 0; j < rqpv; j++) {
1108 map_vector_to_rxq(adapter, i, rxr_idx);
1109 rxr_idx++;
1110 rxr_remaining--;
1111 }
1112 }
1113 for (i = v_start; i < q_vectors; i++) {
1114 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1115 for (j = 0; j < tqpv; j++) {
1116 map_vector_to_txq(adapter, i, txr_idx);
1117 txr_idx++;
1118 txr_remaining--;
1119 }
1120 }
1121
1122out:
1123 return err;
1124}
1125
1126/**
1127 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1128 * @adapter: board private structure
1129 *
1130 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1131 * interrupts from the kernel.
1132 **/
1133static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1134{
1135 struct net_device *netdev = adapter->netdev;
1136 irqreturn_t (*handler)(int, void *);
1137 int i, vector, q_vectors, err;
1138 int ri = 0, ti = 0;
1139
1140 /* Decrement for Other and TCP Timer vectors */
1141 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1142
1143#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1144 ? &ixgbevf_msix_clean_many : \
1145 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1146 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1147 NULL)
1148 for (vector = 0; vector < q_vectors; vector++) {
1149 handler = SET_HANDLER(adapter->q_vector[vector]);
1150
1151 if (handler == &ixgbevf_msix_clean_rx) {
1152 sprintf(adapter->name[vector], "%s-%s-%d",
1153 netdev->name, "rx", ri++);
1154 } else if (handler == &ixgbevf_msix_clean_tx) {
1155 sprintf(adapter->name[vector], "%s-%s-%d",
1156 netdev->name, "tx", ti++);
1157 } else if (handler == &ixgbevf_msix_clean_many) {
1158 sprintf(adapter->name[vector], "%s-%s-%d",
1159 netdev->name, "TxRx", vector);
1160 } else {
1161 /* skip this unused q_vector */
1162 continue;
1163 }
1164 err = request_irq(adapter->msix_entries[vector].vector,
1165 handler, 0, adapter->name[vector],
1166 adapter->q_vector[vector]);
1167 if (err) {
1168 hw_dbg(&adapter->hw,
1169 "request_irq failed for MSIX interrupt "
1170 "Error: %d\n", err);
1171 goto free_queue_irqs;
1172 }
1173 }
1174
1175 sprintf(adapter->name[vector], "%s:mbx", netdev->name);
1176 err = request_irq(adapter->msix_entries[vector].vector,
1177 &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
1178 if (err) {
1179 hw_dbg(&adapter->hw,
1180 "request_irq for msix_mbx failed: %d\n", err);
1181 goto free_queue_irqs;
1182 }
1183
1184 return 0;
1185
1186free_queue_irqs:
1187 for (i = vector - 1; i >= 0; i--)
1188 free_irq(adapter->msix_entries[--vector].vector,
1189 &(adapter->q_vector[i]));
1190 pci_disable_msix(adapter->pdev);
1191 kfree(adapter->msix_entries);
1192 adapter->msix_entries = NULL;
1193 return err;
1194}
1195
1196static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1197{
1198 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1199
1200 for (i = 0; i < q_vectors; i++) {
1201 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1202 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1203 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1204 q_vector->rxr_count = 0;
1205 q_vector->txr_count = 0;
1206 q_vector->eitr = adapter->eitr_param;
1207 }
1208}
1209
1210/**
1211 * ixgbevf_request_irq - initialize interrupts
1212 * @adapter: board private structure
1213 *
1214 * Attempts to configure interrupts using the best available
1215 * capabilities of the hardware and kernel.
1216 **/
1217static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1218{
1219 int err = 0;
1220
1221 err = ixgbevf_request_msix_irqs(adapter);
1222
1223 if (err)
1224 hw_dbg(&adapter->hw,
1225 "request_irq failed, Error %d\n", err);
1226
1227 return err;
1228}
1229
1230static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1231{
1232 struct net_device *netdev = adapter->netdev;
1233 int i, q_vectors;
1234
1235 q_vectors = adapter->num_msix_vectors;
1236
1237 i = q_vectors - 1;
1238
1239 free_irq(adapter->msix_entries[i].vector, netdev);
1240 i--;
1241
1242 for (; i >= 0; i--) {
1243 free_irq(adapter->msix_entries[i].vector,
1244 adapter->q_vector[i]);
1245 }
1246
1247 ixgbevf_reset_q_vectors(adapter);
1248}
1249
1250/**
1251 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1252 * @adapter: board private structure
1253 **/
1254static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1255{
1256 int i;
1257 struct ixgbe_hw *hw = &adapter->hw;
1258
1259 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1260
1261 IXGBE_WRITE_FLUSH(hw);
1262
1263 for (i = 0; i < adapter->num_msix_vectors; i++)
1264 synchronize_irq(adapter->msix_entries[i].vector);
1265}
1266
1267/**
1268 * ixgbevf_irq_enable - Enable default interrupt generation settings
1269 * @adapter: board private structure
1270 **/
1271static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
1272 bool queues, bool flush)
1273{
1274 struct ixgbe_hw *hw = &adapter->hw;
1275 u32 mask;
1276 u64 qmask;
1277
1278 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1279 qmask = ~0;
1280
1281 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1282
1283 if (queues)
1284 ixgbevf_irq_enable_queues(adapter, qmask);
1285
1286 if (flush)
1287 IXGBE_WRITE_FLUSH(hw);
1288}
1289
1290/**
1291 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1292 * @adapter: board private structure
1293 *
1294 * Configure the Tx unit of the MAC after a reset.
1295 **/
1296static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1297{
1298 u64 tdba;
1299 struct ixgbe_hw *hw = &adapter->hw;
1300 u32 i, j, tdlen, txctrl;
1301
1302 /* Setup the HW Tx Head and Tail descriptor pointers */
1303 for (i = 0; i < adapter->num_tx_queues; i++) {
1304 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1305 j = ring->reg_idx;
1306 tdba = ring->dma;
1307 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1308 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1309 (tdba & DMA_BIT_MASK(32)));
1310 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1311 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1312 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1313 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1314 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1315 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1316 /* Disable Tx Head Writeback RO bit, since this hoses
1317 * bookkeeping if things aren't delivered in order.
1318 */
1319 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1320 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1321 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1322 }
1323}
1324
1325#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1326
1327static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1328{
1329 struct ixgbevf_ring *rx_ring;
1330 struct ixgbe_hw *hw = &adapter->hw;
1331 u32 srrctl;
1332
1333 rx_ring = &adapter->rx_ring[index];
1334
1335 srrctl = IXGBE_SRRCTL_DROP_EN;
1336
1337 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1338 u16 bufsz = IXGBEVF_RXBUFFER_2048;
1339 /* grow the amount we can receive on large page machines */
1340 if (bufsz < (PAGE_SIZE / 2))
1341 bufsz = (PAGE_SIZE / 2);
1342 /* cap the bufsz at our largest descriptor size */
1343 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
1344
1345 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1346 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1347 srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
1348 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1349 IXGBE_SRRCTL_BSIZEHDR_MASK);
1350 } else {
1351 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1352
1353 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1354 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1355 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1356 else
1357 srrctl |= rx_ring->rx_buf_len >>
1358 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1359 }
1360 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1361}
1362
1363/**
1364 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1365 * @adapter: board private structure
1366 *
1367 * Configure the Rx unit of the MAC after a reset.
1368 **/
1369static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1370{
1371 u64 rdba;
1372 struct ixgbe_hw *hw = &adapter->hw;
1373 struct net_device *netdev = adapter->netdev;
1374 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1375 int i, j;
1376 u32 rdlen;
1377 int rx_buf_len;
1378
1379 /* Decide whether to use packet split mode or not */
1380 if (netdev->mtu > ETH_DATA_LEN) {
1381 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
1382 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1383 else
1384 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1385 } else {
1386 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
1387 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1388 else
1389 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1390 }
1391
1392 /* Set the RX buffer length according to the mode */
1393 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1394 /* PSRTYPE must be initialized in 82599 */
1395 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1396 IXGBE_PSRTYPE_UDPHDR |
1397 IXGBE_PSRTYPE_IPV4HDR |
1398 IXGBE_PSRTYPE_IPV6HDR |
1399 IXGBE_PSRTYPE_L2HDR;
1400 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1401 rx_buf_len = IXGBEVF_RX_HDR_SIZE;
1402 } else {
1403 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1404 if (netdev->mtu <= ETH_DATA_LEN)
1405 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1406 else
1407 rx_buf_len = ALIGN(max_frame, 1024);
1408 }
1409
1410 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1411 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1412 * the Base and Length of the Rx Descriptor Ring */
1413 for (i = 0; i < adapter->num_rx_queues; i++) {
1414 rdba = adapter->rx_ring[i].dma;
1415 j = adapter->rx_ring[i].reg_idx;
1416 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1417 (rdba & DMA_BIT_MASK(32)));
1418 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1419 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1420 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1421 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1422 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1423 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1424 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1425
1426 ixgbevf_configure_srrctl(adapter, j);
1427 }
1428}
1429
1430static void ixgbevf_vlan_rx_register(struct net_device *netdev,
1431 struct vlan_group *grp)
1432{
1433 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1434 struct ixgbe_hw *hw = &adapter->hw;
1435 int i, j;
1436 u32 ctrl;
1437
1438 adapter->vlgrp = grp;
1439
1440 for (i = 0; i < adapter->num_rx_queues; i++) {
1441 j = adapter->rx_ring[i].reg_idx;
1442 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1443 ctrl |= IXGBE_RXDCTL_VME;
1444 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
1445 }
1446}
1447
1448static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1449{
1450 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1451 struct ixgbe_hw *hw = &adapter->hw;
1452 struct net_device *v_netdev;
1453
1454 /* add VID to filter table */
1455 if (hw->mac.ops.set_vfta)
1456 hw->mac.ops.set_vfta(hw, vid, 0, true);
1457 /*
1458 * Copy feature flags from netdev to the vlan netdev for this vid.
1459 * This allows things like TSO to bubble down to our vlan device.
1460 */
1461 v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
1462 v_netdev->features |= adapter->netdev->features;
1463 vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
1464}
1465
1466static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1467{
1468 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1469 struct ixgbe_hw *hw = &adapter->hw;
1470
1471 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1472 ixgbevf_irq_disable(adapter);
1473
1474 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1475
1476 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
1477 ixgbevf_irq_enable(adapter, true, true);
1478
1479 /* remove VID from filter table */
1480 if (hw->mac.ops.set_vfta)
1481 hw->mac.ops.set_vfta(hw, vid, 0, false);
1482}
1483
1484static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1485{
1486 ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1487
1488 if (adapter->vlgrp) {
1489 u16 vid;
1490 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1491 if (!vlan_group_get_device(adapter->vlgrp, vid))
1492 continue;
1493 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1494 }
1495 }
1496}
1497
1498static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1499 u32 *vmdq)
1500{
1501 struct dev_mc_list *mc_ptr;
1502 u8 *addr = *mc_addr_ptr;
1503 *vmdq = 0;
1504
1505 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1506 if (mc_ptr->next)
1507 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1508 else
1509 *mc_addr_ptr = NULL;
1510
1511 return addr;
1512}
1513
1514/**
1515 * ixgbevf_set_rx_mode - Multicast set
1516 * @netdev: network interface device structure
1517 *
1518 * The set_rx_method entry point is called whenever the multicast address
1519 * list or the network interface flags are updated. This routine is
1520 * responsible for configuring the hardware for proper multicast mode.
1521 **/
1522static void ixgbevf_set_rx_mode(struct net_device *netdev)
1523{
1524 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1525 struct ixgbe_hw *hw = &adapter->hw;
1526 u8 *addr_list = NULL;
1527 int addr_count = 0;
1528
1529 /* reprogram multicast list */
1530 addr_count = netdev->mc_count;
1531 if (addr_count)
1532 addr_list = netdev->mc_list->dmi_addr;
1533 if (hw->mac.ops.update_mc_addr_list)
1534 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
1535 ixgbevf_addr_list_itr);
1536}
1537
1538static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1539{
1540 int q_idx;
1541 struct ixgbevf_q_vector *q_vector;
1542 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1543
1544 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1545 struct napi_struct *napi;
1546 q_vector = adapter->q_vector[q_idx];
1547 if (!q_vector->rxr_count)
1548 continue;
1549 napi = &q_vector->napi;
1550 if (q_vector->rxr_count > 1)
1551 napi->poll = &ixgbevf_clean_rxonly_many;
1552
1553 napi_enable(napi);
1554 }
1555}
1556
1557static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1558{
1559 int q_idx;
1560 struct ixgbevf_q_vector *q_vector;
1561 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1562
1563 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1564 q_vector = adapter->q_vector[q_idx];
1565 if (!q_vector->rxr_count)
1566 continue;
1567 napi_disable(&q_vector->napi);
1568 }
1569}
1570
1571static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1572{
1573 struct net_device *netdev = adapter->netdev;
1574 int i;
1575
1576 ixgbevf_set_rx_mode(netdev);
1577
1578 ixgbevf_restore_vlan(adapter);
1579
1580 ixgbevf_configure_tx(adapter);
1581 ixgbevf_configure_rx(adapter);
1582 for (i = 0; i < adapter->num_rx_queues; i++) {
1583 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1584 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
1585 ring->next_to_use = ring->count - 1;
1586 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1587 }
1588}
1589
1590#define IXGBE_MAX_RX_DESC_POLL 10
1591static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1592 int rxr)
1593{
1594 struct ixgbe_hw *hw = &adapter->hw;
1595 int j = adapter->rx_ring[rxr].reg_idx;
1596 int k;
1597
1598 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1599 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1600 break;
1601 else
1602 msleep(1);
1603 }
1604 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1605 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1606 "not set within the polling period\n", rxr);
1607 }
1608
1609 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1610 (adapter->rx_ring[rxr].count - 1));
1611}
1612
1613static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1614{
1615 struct net_device *netdev = adapter->netdev;
1616 struct ixgbe_hw *hw = &adapter->hw;
1617 int i, j = 0;
1618 int num_rx_rings = adapter->num_rx_queues;
1619 u32 txdctl, rxdctl;
1620
1621 for (i = 0; i < adapter->num_tx_queues; i++) {
1622 j = adapter->tx_ring[i].reg_idx;
1623 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1624 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1625 txdctl |= (8 << 16);
1626 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1627 }
1628
1629 for (i = 0; i < adapter->num_tx_queues; i++) {
1630 j = adapter->tx_ring[i].reg_idx;
1631 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1632 txdctl |= IXGBE_TXDCTL_ENABLE;
1633 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1634 }
1635
1636 for (i = 0; i < num_rx_rings; i++) {
1637 j = adapter->rx_ring[i].reg_idx;
1638 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1639 rxdctl |= IXGBE_RXDCTL_ENABLE;
1640 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1641 ixgbevf_rx_desc_queue_enable(adapter, i);
1642 }
1643
1644 ixgbevf_configure_msix(adapter);
1645
1646 if (hw->mac.ops.set_rar) {
1647 if (is_valid_ether_addr(hw->mac.addr))
1648 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1649 else
1650 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1651 }
1652
1653 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1654 ixgbevf_napi_enable_all(adapter);
1655
1656 /* enable transmits */
1657 netif_tx_start_all_queues(netdev);
1658
1659 /* bring the link up in the watchdog, this could race with our first
1660 * link up interrupt but shouldn't be a problem */
1661 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1662 adapter->link_check_timeout = jiffies;
1663 mod_timer(&adapter->watchdog_timer, jiffies);
1664 return 0;
1665}
1666
1667int ixgbevf_up(struct ixgbevf_adapter *adapter)
1668{
1669 int err;
1670 struct ixgbe_hw *hw = &adapter->hw;
1671
1672 ixgbevf_configure(adapter);
1673
1674 err = ixgbevf_up_complete(adapter);
1675
1676 /* clear any pending interrupts, may auto mask */
1677 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1678
1679 ixgbevf_irq_enable(adapter, true, true);
1680
1681 return err;
1682}
1683
1684/**
1685 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1686 * @adapter: board private structure
1687 * @rx_ring: ring to free buffers from
1688 **/
1689static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1690 struct ixgbevf_ring *rx_ring)
1691{
1692 struct pci_dev *pdev = adapter->pdev;
1693 unsigned long size;
1694 unsigned int i;
1695
1696 if (!rx_ring->rx_buffer_info)
1697 return;
1698
1699 /* Free all the Rx ring sk_buffs */
1700 for (i = 0; i < rx_ring->count; i++) {
1701 struct ixgbevf_rx_buffer *rx_buffer_info;
1702
1703 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1704 if (rx_buffer_info->dma) {
1705 pci_unmap_single(pdev, rx_buffer_info->dma,
1706 rx_ring->rx_buf_len,
1707 PCI_DMA_FROMDEVICE);
1708 rx_buffer_info->dma = 0;
1709 }
1710 if (rx_buffer_info->skb) {
1711 struct sk_buff *skb = rx_buffer_info->skb;
1712 rx_buffer_info->skb = NULL;
1713 do {
1714 struct sk_buff *this = skb;
1715 skb = skb->prev;
1716 dev_kfree_skb(this);
1717 } while (skb);
1718 }
1719 if (!rx_buffer_info->page)
1720 continue;
1721 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
1722 PCI_DMA_FROMDEVICE);
1723 rx_buffer_info->page_dma = 0;
1724 put_page(rx_buffer_info->page);
1725 rx_buffer_info->page = NULL;
1726 rx_buffer_info->page_offset = 0;
1727 }
1728
1729 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1730 memset(rx_ring->rx_buffer_info, 0, size);
1731
1732 /* Zero out the descriptor ring */
1733 memset(rx_ring->desc, 0, rx_ring->size);
1734
1735 rx_ring->next_to_clean = 0;
1736 rx_ring->next_to_use = 0;
1737
1738 if (rx_ring->head)
1739 writel(0, adapter->hw.hw_addr + rx_ring->head);
1740 if (rx_ring->tail)
1741 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1742}
1743
1744/**
1745 * ixgbevf_clean_tx_ring - Free Tx Buffers
1746 * @adapter: board private structure
1747 * @tx_ring: ring to be cleaned
1748 **/
1749static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1750 struct ixgbevf_ring *tx_ring)
1751{
1752 struct ixgbevf_tx_buffer *tx_buffer_info;
1753 unsigned long size;
1754 unsigned int i;
1755
1756 if (!tx_ring->tx_buffer_info)
1757 return;
1758
1759 /* Free all the Tx ring sk_buffs */
1760
1761 for (i = 0; i < tx_ring->count; i++) {
1762 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1763 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1764 }
1765
1766 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1767 memset(tx_ring->tx_buffer_info, 0, size);
1768
1769 memset(tx_ring->desc, 0, tx_ring->size);
1770
1771 tx_ring->next_to_use = 0;
1772 tx_ring->next_to_clean = 0;
1773
1774 if (tx_ring->head)
1775 writel(0, adapter->hw.hw_addr + tx_ring->head);
1776 if (tx_ring->tail)
1777 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1778}
1779
1780/**
1781 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1782 * @adapter: board private structure
1783 **/
1784static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1785{
1786 int i;
1787
1788 for (i = 0; i < adapter->num_rx_queues; i++)
1789 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1790}
1791
1792/**
1793 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1794 * @adapter: board private structure
1795 **/
1796static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1797{
1798 int i;
1799
1800 for (i = 0; i < adapter->num_tx_queues; i++)
1801 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1802}
1803
1804void ixgbevf_down(struct ixgbevf_adapter *adapter)
1805{
1806 struct net_device *netdev = adapter->netdev;
1807 struct ixgbe_hw *hw = &adapter->hw;
1808 u32 txdctl;
1809 int i, j;
1810
1811 /* signal that we are down to the interrupt handler */
1812 set_bit(__IXGBEVF_DOWN, &adapter->state);
1813 /* disable receives */
1814
1815 netif_tx_disable(netdev);
1816
1817 msleep(10);
1818
1819 netif_tx_stop_all_queues(netdev);
1820
1821 ixgbevf_irq_disable(adapter);
1822
1823 ixgbevf_napi_disable_all(adapter);
1824
1825 del_timer_sync(&adapter->watchdog_timer);
1826 /* can't call flush scheduled work here because it can deadlock
1827 * if linkwatch_event tries to acquire the rtnl_lock which we are
1828 * holding */
1829 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1830 msleep(1);
1831
1832 /* disable transmits in the hardware now that interrupts are off */
1833 for (i = 0; i < adapter->num_tx_queues; i++) {
1834 j = adapter->tx_ring[i].reg_idx;
1835 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1836 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1837 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1838 }
1839
1840 netif_carrier_off(netdev);
1841
1842 if (!pci_channel_offline(adapter->pdev))
1843 ixgbevf_reset(adapter);
1844
1845 ixgbevf_clean_all_tx_rings(adapter);
1846 ixgbevf_clean_all_rx_rings(adapter);
1847}
1848
1849void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1850{
1851 struct ixgbe_hw *hw = &adapter->hw;
1852
1853 WARN_ON(in_interrupt());
1854
1855 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1856 msleep(1);
1857
1858 /*
1859 * Check if PF is up before re-init. If not then skip until
1860 * later when the PF is up and ready to service requests from
1861 * the VF via mailbox. If the VF is up and running then the
1862 * watchdog task will continue to schedule reset tasks until
1863 * the PF is up and running.
1864 */
1865 if (!hw->mac.ops.reset_hw(hw)) {
1866 ixgbevf_down(adapter);
1867 ixgbevf_up(adapter);
1868 }
1869
1870 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1871}
1872
1873void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1874{
1875 struct ixgbe_hw *hw = &adapter->hw;
1876 struct net_device *netdev = adapter->netdev;
1877
1878 if (hw->mac.ops.reset_hw(hw))
1879 hw_dbg(hw, "PF still resetting\n");
1880 else
1881 hw->mac.ops.init_hw(hw);
1882
1883 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1884 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1885 netdev->addr_len);
1886 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1887 netdev->addr_len);
1888 }
1889}
1890
1891static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1892 int vectors)
1893{
1894 int err, vector_threshold;
1895
1896 /* We'll want at least 3 (vector_threshold):
1897 * 1) TxQ[0] Cleanup
1898 * 2) RxQ[0] Cleanup
1899 * 3) Other (Link Status Change, etc.)
1900 */
1901 vector_threshold = MIN_MSIX_COUNT;
1902
1903 /* The more we get, the more we will assign to Tx/Rx Cleanup
1904 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1905 * Right now, we simply care about how many we'll get; we'll
1906 * set them up later while requesting irq's.
1907 */
1908 while (vectors >= vector_threshold) {
1909 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1910 vectors);
1911 if (!err) /* Success in acquiring all requested vectors. */
1912 break;
1913 else if (err < 0)
1914 vectors = 0; /* Nasty failure, quit now */
1915 else /* err == number of vectors we should try again with */
1916 vectors = err;
1917 }
1918
1919 if (vectors < vector_threshold) {
1920 /* Can't allocate enough MSI-X interrupts? Oh well.
1921 * This just means we'll go with either a single MSI
1922 * vector or fall back to legacy interrupts.
1923 */
1924 hw_dbg(&adapter->hw,
1925 "Unable to allocate MSI-X interrupts\n");
1926 kfree(adapter->msix_entries);
1927 adapter->msix_entries = NULL;
1928 } else {
1929 /*
1930 * Adjust for only the vectors we'll use, which is minimum
1931 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1932 * vectors we were allocated.
1933 */
1934 adapter->num_msix_vectors = vectors;
1935 }
1936}
1937
1938/*
1939 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
1940 * @adapter: board private structure to initialize
1941 *
1942 * This is the top level queue allocation routine. The order here is very
1943 * important, starting with the "most" number of features turned on at once,
1944 * and ending with the smallest set of features. This way large combinations
1945 * can be allocated if they're turned on, and smaller combinations are the
1946 * fallthrough conditions.
1947 *
1948 **/
1949static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1950{
1951 /* Start with base case */
1952 adapter->num_rx_queues = 1;
1953 adapter->num_tx_queues = 1;
1954 adapter->num_rx_pools = adapter->num_rx_queues;
1955 adapter->num_rx_queues_per_pool = 1;
1956}
1957
1958/**
1959 * ixgbevf_alloc_queues - Allocate memory for all rings
1960 * @adapter: board private structure to initialize
1961 *
1962 * We allocate one ring per queue at run-time since we don't know the
1963 * number of queues at compile-time. The polling_netdev array is
1964 * intended for Multiqueue, but should work fine with a single queue.
1965 **/
1966static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1967{
1968 int i;
1969
1970 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1971 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1972 if (!adapter->tx_ring)
1973 goto err_tx_ring_allocation;
1974
1975 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1976 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1977 if (!adapter->rx_ring)
1978 goto err_rx_ring_allocation;
1979
1980 for (i = 0; i < adapter->num_tx_queues; i++) {
1981 adapter->tx_ring[i].count = adapter->tx_ring_count;
1982 adapter->tx_ring[i].queue_index = i;
1983 adapter->tx_ring[i].reg_idx = i;
1984 }
1985
1986 for (i = 0; i < adapter->num_rx_queues; i++) {
1987 adapter->rx_ring[i].count = adapter->rx_ring_count;
1988 adapter->rx_ring[i].queue_index = i;
1989 adapter->rx_ring[i].reg_idx = i;
1990 }
1991
1992 return 0;
1993
1994err_rx_ring_allocation:
1995 kfree(adapter->tx_ring);
1996err_tx_ring_allocation:
1997 return -ENOMEM;
1998}
1999
2000/**
2001 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2002 * @adapter: board private structure to initialize
2003 *
2004 * Attempt to configure the interrupts using the best available
2005 * capabilities of the hardware and the kernel.
2006 **/
2007static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2008{
2009 int err = 0;
2010 int vector, v_budget;
2011
2012 /*
2013 * It's easy to be greedy for MSI-X vectors, but it really
2014 * doesn't do us much good if we have a lot more vectors
2015 * than CPU's. So let's be conservative and only ask for
2016 * (roughly) twice the number of vectors as there are CPU's.
2017 */
2018 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2019 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2020
2021 /* A failure in MSI-X entry allocation isn't fatal, but it does
2022 * mean we disable MSI-X capabilities of the adapter. */
2023 adapter->msix_entries = kcalloc(v_budget,
2024 sizeof(struct msix_entry), GFP_KERNEL);
2025 if (!adapter->msix_entries) {
2026 err = -ENOMEM;
2027 goto out;
2028 }
2029
2030 for (vector = 0; vector < v_budget; vector++)
2031 adapter->msix_entries[vector].entry = vector;
2032
2033 ixgbevf_acquire_msix_vectors(adapter, v_budget);
2034
2035out:
2036 return err;
2037}
2038
2039/**
2040 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2041 * @adapter: board private structure to initialize
2042 *
2043 * We allocate one q_vector per queue interrupt. If allocation fails we
2044 * return -ENOMEM.
2045 **/
2046static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2047{
2048 int q_idx, num_q_vectors;
2049 struct ixgbevf_q_vector *q_vector;
2050 int napi_vectors;
2051 int (*poll)(struct napi_struct *, int);
2052
2053 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2054 napi_vectors = adapter->num_rx_queues;
2055 poll = &ixgbevf_clean_rxonly;
2056
2057 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2058 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2059 if (!q_vector)
2060 goto err_out;
2061 q_vector->adapter = adapter;
2062 q_vector->v_idx = q_idx;
2063 q_vector->eitr = adapter->eitr_param;
2064 if (q_idx < napi_vectors)
2065 netif_napi_add(adapter->netdev, &q_vector->napi,
2066 (*poll), 64);
2067 adapter->q_vector[q_idx] = q_vector;
2068 }
2069
2070 return 0;
2071
2072err_out:
2073 while (q_idx) {
2074 q_idx--;
2075 q_vector = adapter->q_vector[q_idx];
2076 netif_napi_del(&q_vector->napi);
2077 kfree(q_vector);
2078 adapter->q_vector[q_idx] = NULL;
2079 }
2080 return -ENOMEM;
2081}
2082
2083/**
2084 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2085 * @adapter: board private structure to initialize
2086 *
2087 * This function frees the memory allocated to the q_vectors. In addition if
2088 * NAPI is enabled it will delete any references to the NAPI struct prior
2089 * to freeing the q_vector.
2090 **/
2091static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2092{
2093 int q_idx, num_q_vectors;
2094 int napi_vectors;
2095
2096 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2097 napi_vectors = adapter->num_rx_queues;
2098
2099 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2100 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2101
2102 adapter->q_vector[q_idx] = NULL;
2103 if (q_idx < napi_vectors)
2104 netif_napi_del(&q_vector->napi);
2105 kfree(q_vector);
2106 }
2107}
2108
2109/**
2110 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2111 * @adapter: board private structure
2112 *
2113 **/
2114static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2115{
2116 pci_disable_msix(adapter->pdev);
2117 kfree(adapter->msix_entries);
2118 adapter->msix_entries = NULL;
2119
2120 return;
2121}
2122
2123/**
2124 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2125 * @adapter: board private structure to initialize
2126 *
2127 **/
2128static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2129{
2130 int err;
2131
2132 /* Number of supported queues */
2133 ixgbevf_set_num_queues(adapter);
2134
2135 err = ixgbevf_set_interrupt_capability(adapter);
2136 if (err) {
2137 hw_dbg(&adapter->hw,
2138 "Unable to setup interrupt capabilities\n");
2139 goto err_set_interrupt;
2140 }
2141
2142 err = ixgbevf_alloc_q_vectors(adapter);
2143 if (err) {
2144 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2145 "vectors\n");
2146 goto err_alloc_q_vectors;
2147 }
2148
2149 err = ixgbevf_alloc_queues(adapter);
2150 if (err) {
2151 printk(KERN_ERR "Unable to allocate memory for queues\n");
2152 goto err_alloc_queues;
2153 }
2154
2155 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2156 "Tx Queue count = %u\n",
2157 (adapter->num_rx_queues > 1) ? "Enabled" :
2158 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2159
2160 set_bit(__IXGBEVF_DOWN, &adapter->state);
2161
2162 return 0;
2163err_alloc_queues:
2164 ixgbevf_free_q_vectors(adapter);
2165err_alloc_q_vectors:
2166 ixgbevf_reset_interrupt_capability(adapter);
2167err_set_interrupt:
2168 return err;
2169}
2170
2171/**
2172 * ixgbevf_sw_init - Initialize general software structures
2173 * (struct ixgbevf_adapter)
2174 * @adapter: board private structure to initialize
2175 *
2176 * ixgbevf_sw_init initializes the Adapter private data structure.
2177 * Fields are initialized based on PCI device information and
2178 * OS network device settings (MTU size).
2179 **/
2180static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2181{
2182 struct ixgbe_hw *hw = &adapter->hw;
2183 struct pci_dev *pdev = adapter->pdev;
2184 int err;
2185
2186 /* PCI config space info */
2187
2188 hw->vendor_id = pdev->vendor;
2189 hw->device_id = pdev->device;
2190 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2191 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2192 hw->subsystem_device_id = pdev->subsystem_device;
2193
2194 hw->mbx.ops.init_params(hw);
2195 hw->mac.max_tx_queues = MAX_TX_QUEUES;
2196 hw->mac.max_rx_queues = MAX_RX_QUEUES;
2197 err = hw->mac.ops.reset_hw(hw);
2198 if (err) {
2199 dev_info(&pdev->dev,
2200 "PF still in reset state, assigning new address\n");
2201 random_ether_addr(hw->mac.addr);
2202 } else {
2203 err = hw->mac.ops.init_hw(hw);
2204 if (err) {
2205 printk(KERN_ERR "init_shared_code failed: %d\n", err);
2206 goto out;
2207 }
2208 }
2209
2210 /* Enable dynamic interrupt throttling rates */
2211 adapter->eitr_param = 20000;
2212 adapter->itr_setting = 1;
2213
2214 /* set defaults for eitr in MegaBytes */
2215 adapter->eitr_low = 10;
2216 adapter->eitr_high = 20;
2217
2218 /* set default ring sizes */
2219 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2220 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2221
2222 /* enable rx csum by default */
2223 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2224
2225 set_bit(__IXGBEVF_DOWN, &adapter->state);
2226
2227out:
2228 return err;
2229}
2230
2231static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2232{
2233 struct ixgbe_hw *hw = &adapter->hw;
2234
2235 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2236 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2237 adapter->stats.last_vfgorc |=
2238 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2239 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2240 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2241 adapter->stats.last_vfgotc |=
2242 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2243 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2244
2245 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2246 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2247 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2248 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2249 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2250}
2251
2252#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2253 { \
2254 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2255 if (current_counter < last_counter) \
2256 counter += 0x100000000LL; \
2257 last_counter = current_counter; \
2258 counter &= 0xFFFFFFFF00000000LL; \
2259 counter |= current_counter; \
2260 }
2261
2262#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2263 { \
2264 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2265 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2266 u64 current_counter = (current_counter_msb << 32) | \
2267 current_counter_lsb; \
2268 if (current_counter < last_counter) \
2269 counter += 0x1000000000LL; \
2270 last_counter = current_counter; \
2271 counter &= 0xFFFFFFF000000000LL; \
2272 counter |= current_counter; \
2273 }
2274/**
2275 * ixgbevf_update_stats - Update the board statistics counters.
2276 * @adapter: board private structure
2277 **/
2278void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2279{
2280 struct ixgbe_hw *hw = &adapter->hw;
2281
2282 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2283 adapter->stats.vfgprc);
2284 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2285 adapter->stats.vfgptc);
2286 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2287 adapter->stats.last_vfgorc,
2288 adapter->stats.vfgorc);
2289 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2290 adapter->stats.last_vfgotc,
2291 adapter->stats.vfgotc);
2292 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2293 adapter->stats.vfmprc);
2294
2295 /* Fill out the OS statistics structure */
2296 adapter->net_stats.multicast = adapter->stats.vfmprc -
2297 adapter->stats.base_vfmprc;
2298}
2299
2300/**
2301 * ixgbevf_watchdog - Timer Call-back
2302 * @data: pointer to adapter cast into an unsigned long
2303 **/
2304static void ixgbevf_watchdog(unsigned long data)
2305{
2306 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2307 struct ixgbe_hw *hw = &adapter->hw;
2308 u64 eics = 0;
2309 int i;
2310
2311 /*
2312 * Do the watchdog outside of interrupt context due to the lovely
2313 * delays that some of the newer hardware requires
2314 */
2315
2316 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2317 goto watchdog_short_circuit;
2318
2319 /* get one bit for every active tx/rx interrupt vector */
2320 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2321 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2322 if (qv->rxr_count || qv->txr_count)
2323 eics |= (1 << i);
2324 }
2325
2326 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
2327
2328watchdog_short_circuit:
2329 schedule_work(&adapter->watchdog_task);
2330}
2331
2332/**
2333 * ixgbevf_tx_timeout - Respond to a Tx Hang
2334 * @netdev: network interface device structure
2335 **/
2336static void ixgbevf_tx_timeout(struct net_device *netdev)
2337{
2338 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2339
2340 /* Do the reset outside of interrupt context */
2341 schedule_work(&adapter->reset_task);
2342}
2343
2344static void ixgbevf_reset_task(struct work_struct *work)
2345{
2346 struct ixgbevf_adapter *adapter;
2347 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2348
2349 /* If we're already down or resetting, just bail */
2350 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2351 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2352 return;
2353
2354 adapter->tx_timeout_count++;
2355
2356 ixgbevf_reinit_locked(adapter);
2357}
2358
2359/**
2360 * ixgbevf_watchdog_task - worker thread to bring link up
2361 * @work: pointer to work_struct containing our data
2362 **/
2363static void ixgbevf_watchdog_task(struct work_struct *work)
2364{
2365 struct ixgbevf_adapter *adapter = container_of(work,
2366 struct ixgbevf_adapter,
2367 watchdog_task);
2368 struct net_device *netdev = adapter->netdev;
2369 struct ixgbe_hw *hw = &adapter->hw;
2370 u32 link_speed = adapter->link_speed;
2371 bool link_up = adapter->link_up;
2372
2373 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2374
2375 /*
2376 * Always check the link on the watchdog because we have
2377 * no LSC interrupt
2378 */
2379 if (hw->mac.ops.check_link) {
2380 if ((hw->mac.ops.check_link(hw, &link_speed,
2381 &link_up, false)) != 0) {
2382 adapter->link_up = link_up;
2383 adapter->link_speed = link_speed;
2384 netif_carrier_off(netdev);
2385 netif_tx_stop_all_queues(netdev);
2386 schedule_work(&adapter->reset_task);
2387 goto pf_has_reset;
2388 }
2389 } else {
2390 /* always assume link is up, if no check link
2391 * function */
2392 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2393 link_up = true;
2394 }
2395 adapter->link_up = link_up;
2396 adapter->link_speed = link_speed;
2397
2398 if (link_up) {
2399 if (!netif_carrier_ok(netdev)) {
2400 hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
2401 ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2402 "10 Gbps" : "1 Gbps"));
2403 netif_carrier_on(netdev);
2404 netif_tx_wake_all_queues(netdev);
2405 } else {
2406 /* Force detection of hung controller */
2407 adapter->detect_tx_hung = true;
2408 }
2409 } else {
2410 adapter->link_up = false;
2411 adapter->link_speed = 0;
2412 if (netif_carrier_ok(netdev)) {
2413 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2414 netif_carrier_off(netdev);
2415 netif_tx_stop_all_queues(netdev);
2416 }
2417 }
2418
2419pf_has_reset:
2420 ixgbevf_update_stats(adapter);
2421
2422 /* Force detection of hung controller every watchdog period */
2423 adapter->detect_tx_hung = true;
2424
2425 /* Reset the timer */
2426 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2427 mod_timer(&adapter->watchdog_timer,
2428 round_jiffies(jiffies + (2 * HZ)));
2429
2430 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2431}
2432
2433/**
2434 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2435 * @adapter: board private structure
2436 * @tx_ring: Tx descriptor ring for a specific queue
2437 *
2438 * Free all transmit software resources
2439 **/
2440void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2441 struct ixgbevf_ring *tx_ring)
2442{
2443 struct pci_dev *pdev = adapter->pdev;
2444
2445 ixgbevf_clean_tx_ring(adapter, tx_ring);
2446
2447 vfree(tx_ring->tx_buffer_info);
2448 tx_ring->tx_buffer_info = NULL;
2449
2450 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2451
2452 tx_ring->desc = NULL;
2453}
2454
2455/**
2456 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2457 * @adapter: board private structure
2458 *
2459 * Free all transmit software resources
2460 **/
2461static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2462{
2463 int i;
2464
2465 for (i = 0; i < adapter->num_tx_queues; i++)
2466 if (adapter->tx_ring[i].desc)
2467 ixgbevf_free_tx_resources(adapter,
2468 &adapter->tx_ring[i]);
2469
2470}
2471
2472/**
2473 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2474 * @adapter: board private structure
2475 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2476 *
2477 * Return 0 on success, negative on failure
2478 **/
2479int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2480 struct ixgbevf_ring *tx_ring)
2481{
2482 struct pci_dev *pdev = adapter->pdev;
2483 int size;
2484
2485 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2486 tx_ring->tx_buffer_info = vmalloc(size);
2487 if (!tx_ring->tx_buffer_info)
2488 goto err;
2489 memset(tx_ring->tx_buffer_info, 0, size);
2490
2491 /* round up to nearest 4K */
2492 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2493 tx_ring->size = ALIGN(tx_ring->size, 4096);
2494
2495 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2496 &tx_ring->dma);
2497 if (!tx_ring->desc)
2498 goto err;
2499
2500 tx_ring->next_to_use = 0;
2501 tx_ring->next_to_clean = 0;
2502 tx_ring->work_limit = tx_ring->count;
2503 return 0;
2504
2505err:
2506 vfree(tx_ring->tx_buffer_info);
2507 tx_ring->tx_buffer_info = NULL;
2508 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2509 "descriptor ring\n");
2510 return -ENOMEM;
2511}
2512
2513/**
2514 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2515 * @adapter: board private structure
2516 *
2517 * If this function returns with an error, then it's possible one or
2518 * more of the rings is populated (while the rest are not). It is the
2519 * callers duty to clean those orphaned rings.
2520 *
2521 * Return 0 on success, negative on failure
2522 **/
2523static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2524{
2525 int i, err = 0;
2526
2527 for (i = 0; i < adapter->num_tx_queues; i++) {
2528 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2529 if (!err)
2530 continue;
2531 hw_dbg(&adapter->hw,
2532 "Allocation for Tx Queue %u failed\n", i);
2533 break;
2534 }
2535
2536 return err;
2537}
2538
2539/**
2540 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2541 * @adapter: board private structure
2542 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2543 *
2544 * Returns 0 on success, negative on failure
2545 **/
2546int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2547 struct ixgbevf_ring *rx_ring)
2548{
2549 struct pci_dev *pdev = adapter->pdev;
2550 int size;
2551
2552 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2553 rx_ring->rx_buffer_info = vmalloc(size);
2554 if (!rx_ring->rx_buffer_info) {
2555 hw_dbg(&adapter->hw,
2556 "Unable to vmalloc buffer memory for "
2557 "the receive descriptor ring\n");
2558 goto alloc_failed;
2559 }
2560 memset(rx_ring->rx_buffer_info, 0, size);
2561
2562 /* Round up to nearest 4K */
2563 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2564 rx_ring->size = ALIGN(rx_ring->size, 4096);
2565
2566 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2567 &rx_ring->dma);
2568
2569 if (!rx_ring->desc) {
2570 hw_dbg(&adapter->hw,
2571 "Unable to allocate memory for "
2572 "the receive descriptor ring\n");
2573 vfree(rx_ring->rx_buffer_info);
2574 rx_ring->rx_buffer_info = NULL;
2575 goto alloc_failed;
2576 }
2577
2578 rx_ring->next_to_clean = 0;
2579 rx_ring->next_to_use = 0;
2580
2581 return 0;
2582alloc_failed:
2583 return -ENOMEM;
2584}
2585
2586/**
2587 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2588 * @adapter: board private structure
2589 *
2590 * If this function returns with an error, then it's possible one or
2591 * more of the rings is populated (while the rest are not). It is the
2592 * callers duty to clean those orphaned rings.
2593 *
2594 * Return 0 on success, negative on failure
2595 **/
2596static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2597{
2598 int i, err = 0;
2599
2600 for (i = 0; i < adapter->num_rx_queues; i++) {
2601 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2602 if (!err)
2603 continue;
2604 hw_dbg(&adapter->hw,
2605 "Allocation for Rx Queue %u failed\n", i);
2606 break;
2607 }
2608 return err;
2609}
2610
2611/**
2612 * ixgbevf_free_rx_resources - Free Rx Resources
2613 * @adapter: board private structure
2614 * @rx_ring: ring to clean the resources from
2615 *
2616 * Free all receive software resources
2617 **/
2618void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2619 struct ixgbevf_ring *rx_ring)
2620{
2621 struct pci_dev *pdev = adapter->pdev;
2622
2623 ixgbevf_clean_rx_ring(adapter, rx_ring);
2624
2625 vfree(rx_ring->rx_buffer_info);
2626 rx_ring->rx_buffer_info = NULL;
2627
2628 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2629
2630 rx_ring->desc = NULL;
2631}
2632
2633/**
2634 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2635 * @adapter: board private structure
2636 *
2637 * Free all receive software resources
2638 **/
2639static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2640{
2641 int i;
2642
2643 for (i = 0; i < adapter->num_rx_queues; i++)
2644 if (adapter->rx_ring[i].desc)
2645 ixgbevf_free_rx_resources(adapter,
2646 &adapter->rx_ring[i]);
2647}
2648
2649/**
2650 * ixgbevf_open - Called when a network interface is made active
2651 * @netdev: network interface device structure
2652 *
2653 * Returns 0 on success, negative value on failure
2654 *
2655 * The open entry point is called when a network interface is made
2656 * active by the system (IFF_UP). At this point all resources needed
2657 * for transmit and receive operations are allocated, the interrupt
2658 * handler is registered with the OS, the watchdog timer is started,
2659 * and the stack is notified that the interface is ready.
2660 **/
2661static int ixgbevf_open(struct net_device *netdev)
2662{
2663 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2664 struct ixgbe_hw *hw = &adapter->hw;
2665 int err;
2666
2667 /* disallow open during test */
2668 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2669 return -EBUSY;
2670
2671 if (hw->adapter_stopped) {
2672 ixgbevf_reset(adapter);
2673 /* if adapter is still stopped then PF isn't up and
2674 * the vf can't start. */
2675 if (hw->adapter_stopped) {
2676 err = IXGBE_ERR_MBX;
2677 printk(KERN_ERR "Unable to start - perhaps the PF"
2678 "Driver isn't up yet\n");
2679 goto err_setup_reset;
2680 }
2681 }
2682
2683 /* allocate transmit descriptors */
2684 err = ixgbevf_setup_all_tx_resources(adapter);
2685 if (err)
2686 goto err_setup_tx;
2687
2688 /* allocate receive descriptors */
2689 err = ixgbevf_setup_all_rx_resources(adapter);
2690 if (err)
2691 goto err_setup_rx;
2692
2693 ixgbevf_configure(adapter);
2694
2695 /*
2696 * Map the Tx/Rx rings to the vectors we were allotted.
2697 * if request_irq will be called in this function map_rings
2698 * must be called *before* up_complete
2699 */
2700 ixgbevf_map_rings_to_vectors(adapter);
2701
2702 err = ixgbevf_up_complete(adapter);
2703 if (err)
2704 goto err_up;
2705
2706 /* clear any pending interrupts, may auto mask */
2707 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2708 err = ixgbevf_request_irq(adapter);
2709 if (err)
2710 goto err_req_irq;
2711
2712 ixgbevf_irq_enable(adapter, true, true);
2713
2714 return 0;
2715
2716err_req_irq:
2717 ixgbevf_down(adapter);
2718err_up:
2719 ixgbevf_free_irq(adapter);
2720err_setup_rx:
2721 ixgbevf_free_all_rx_resources(adapter);
2722err_setup_tx:
2723 ixgbevf_free_all_tx_resources(adapter);
2724 ixgbevf_reset(adapter);
2725
2726err_setup_reset:
2727
2728 return err;
2729}
2730
2731/**
2732 * ixgbevf_close - Disables a network interface
2733 * @netdev: network interface device structure
2734 *
2735 * Returns 0, this is not allowed to fail
2736 *
2737 * The close entry point is called when an interface is de-activated
2738 * by the OS. The hardware is still under the drivers control, but
2739 * needs to be disabled. A global MAC reset is issued to stop the
2740 * hardware, and all transmit and receive resources are freed.
2741 **/
2742static int ixgbevf_close(struct net_device *netdev)
2743{
2744 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2745
2746 ixgbevf_down(adapter);
2747 ixgbevf_free_irq(adapter);
2748
2749 ixgbevf_free_all_tx_resources(adapter);
2750 ixgbevf_free_all_rx_resources(adapter);
2751
2752 return 0;
2753}
2754
2755static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
2756 struct ixgbevf_ring *tx_ring,
2757 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2758{
2759 struct ixgbe_adv_tx_context_desc *context_desc;
2760 unsigned int i;
2761 int err;
2762 struct ixgbevf_tx_buffer *tx_buffer_info;
2763 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2764 u32 mss_l4len_idx, l4len;
2765
2766 if (skb_is_gso(skb)) {
2767 if (skb_header_cloned(skb)) {
2768 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2769 if (err)
2770 return err;
2771 }
2772 l4len = tcp_hdrlen(skb);
2773 *hdr_len += l4len;
2774
2775 if (skb->protocol == htons(ETH_P_IP)) {
2776 struct iphdr *iph = ip_hdr(skb);
2777 iph->tot_len = 0;
2778 iph->check = 0;
2779 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2780 iph->daddr, 0,
2781 IPPROTO_TCP,
2782 0);
2783 adapter->hw_tso_ctxt++;
2784 } else if (skb_is_gso_v6(skb)) {
2785 ipv6_hdr(skb)->payload_len = 0;
2786 tcp_hdr(skb)->check =
2787 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2788 &ipv6_hdr(skb)->daddr,
2789 0, IPPROTO_TCP, 0);
2790 adapter->hw_tso6_ctxt++;
2791 }
2792
2793 i = tx_ring->next_to_use;
2794
2795 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2796 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2797
2798 /* VLAN MACLEN IPLEN */
2799 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2800 vlan_macip_lens |=
2801 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2802 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2803 IXGBE_ADVTXD_MACLEN_SHIFT);
2804 *hdr_len += skb_network_offset(skb);
2805 vlan_macip_lens |=
2806 (skb_transport_header(skb) - skb_network_header(skb));
2807 *hdr_len +=
2808 (skb_transport_header(skb) - skb_network_header(skb));
2809 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2810 context_desc->seqnum_seed = 0;
2811
2812 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2813 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2814 IXGBE_ADVTXD_DTYP_CTXT);
2815
2816 if (skb->protocol == htons(ETH_P_IP))
2817 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2818 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2819 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2820
2821 /* MSS L4LEN IDX */
2822 mss_l4len_idx =
2823 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2824 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2825 /* use index 1 for TSO */
2826 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2827 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2828
2829 tx_buffer_info->time_stamp = jiffies;
2830 tx_buffer_info->next_to_watch = i;
2831
2832 i++;
2833 if (i == tx_ring->count)
2834 i = 0;
2835 tx_ring->next_to_use = i;
2836
2837 return true;
2838 }
2839
2840 return false;
2841}
2842
2843static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
2844 struct ixgbevf_ring *tx_ring,
2845 struct sk_buff *skb, u32 tx_flags)
2846{
2847 struct ixgbe_adv_tx_context_desc *context_desc;
2848 unsigned int i;
2849 struct ixgbevf_tx_buffer *tx_buffer_info;
2850 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2851
2852 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2853 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2854 i = tx_ring->next_to_use;
2855 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2856 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2857
2858 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2859 vlan_macip_lens |= (tx_flags &
2860 IXGBE_TX_FLAGS_VLAN_MASK);
2861 vlan_macip_lens |= (skb_network_offset(skb) <<
2862 IXGBE_ADVTXD_MACLEN_SHIFT);
2863 if (skb->ip_summed == CHECKSUM_PARTIAL)
2864 vlan_macip_lens |= (skb_transport_header(skb) -
2865 skb_network_header(skb));
2866
2867 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2868 context_desc->seqnum_seed = 0;
2869
2870 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2871 IXGBE_ADVTXD_DTYP_CTXT);
2872
2873 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2874 switch (skb->protocol) {
2875 case __constant_htons(ETH_P_IP):
2876 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2877 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2878 type_tucmd_mlhl |=
2879 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2880 break;
2881 case __constant_htons(ETH_P_IPV6):
2882 /* XXX what about other V6 headers?? */
2883 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2884 type_tucmd_mlhl |=
2885 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2886 break;
2887 default:
2888 if (unlikely(net_ratelimit())) {
2889 printk(KERN_WARNING
2890 "partial checksum but "
2891 "proto=%x!\n",
2892 skb->protocol);
2893 }
2894 break;
2895 }
2896 }
2897
2898 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2899 /* use index zero for tx checksum offload */
2900 context_desc->mss_l4len_idx = 0;
2901
2902 tx_buffer_info->time_stamp = jiffies;
2903 tx_buffer_info->next_to_watch = i;
2904
2905 adapter->hw_csum_tx_good++;
2906 i++;
2907 if (i == tx_ring->count)
2908 i = 0;
2909 tx_ring->next_to_use = i;
2910
2911 return true;
2912 }
2913
2914 return false;
2915}
2916
2917static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2918 struct ixgbevf_ring *tx_ring,
2919 struct sk_buff *skb, u32 tx_flags,
2920 unsigned int first)
2921{
2922 struct pci_dev *pdev = adapter->pdev;
2923 struct ixgbevf_tx_buffer *tx_buffer_info;
2924 unsigned int len;
2925 unsigned int total = skb->len;
2926 unsigned int offset = 0, size, count = 0, i;
2927 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2928 unsigned int f;
2929
2930 i = tx_ring->next_to_use;
2931
2932 len = min(skb_headlen(skb), total);
2933 while (len) {
2934 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2935 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2936
2937 tx_buffer_info->length = size;
2938 tx_buffer_info->mapped_as_page = false;
2939 tx_buffer_info->dma = pci_map_single(adapter->pdev,
2940 skb->data + offset,
2941 size, PCI_DMA_TODEVICE);
2942 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2943 goto dma_error;
2944 tx_buffer_info->time_stamp = jiffies;
2945 tx_buffer_info->next_to_watch = i;
2946
2947 len -= size;
2948 total -= size;
2949 offset += size;
2950 count++;
2951 i++;
2952 if (i == tx_ring->count)
2953 i = 0;
2954 }
2955
2956 for (f = 0; f < nr_frags; f++) {
2957 struct skb_frag_struct *frag;
2958
2959 frag = &skb_shinfo(skb)->frags[f];
2960 len = min((unsigned int)frag->size, total);
2961 offset = frag->page_offset;
2962
2963 while (len) {
2964 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2965 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2966
2967 tx_buffer_info->length = size;
2968 tx_buffer_info->dma = pci_map_page(adapter->pdev,
2969 frag->page,
2970 offset,
2971 size,
2972 PCI_DMA_TODEVICE);
2973 tx_buffer_info->mapped_as_page = true;
2974 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
2975 goto dma_error;
2976 tx_buffer_info->time_stamp = jiffies;
2977 tx_buffer_info->next_to_watch = i;
2978
2979 len -= size;
2980 total -= size;
2981 offset += size;
2982 count++;
2983 i++;
2984 if (i == tx_ring->count)
2985 i = 0;
2986 }
2987 if (total == 0)
2988 break;
2989 }
2990
2991 if (i == 0)
2992 i = tx_ring->count - 1;
2993 else
2994 i = i - 1;
2995 tx_ring->tx_buffer_info[i].skb = skb;
2996 tx_ring->tx_buffer_info[first].next_to_watch = i;
2997
2998 return count;
2999
3000dma_error:
3001 dev_err(&pdev->dev, "TX DMA map failed\n");
3002
3003 /* clear timestamp and dma mappings for failed tx_buffer_info map */
3004 tx_buffer_info->dma = 0;
3005 tx_buffer_info->time_stamp = 0;
3006 tx_buffer_info->next_to_watch = 0;
3007 count--;
3008
3009 /* clear timestamp and dma mappings for remaining portion of packet */
3010 while (count >= 0) {
3011 count--;
3012 i--;
3013 if (i < 0)
3014 i += tx_ring->count;
3015 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3016 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
3017 }
3018
3019 return count;
3020}
3021
3022static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
3023 struct ixgbevf_ring *tx_ring, int tx_flags,
3024 int count, u32 paylen, u8 hdr_len)
3025{
3026 union ixgbe_adv_tx_desc *tx_desc = NULL;
3027 struct ixgbevf_tx_buffer *tx_buffer_info;
3028 u32 olinfo_status = 0, cmd_type_len = 0;
3029 unsigned int i;
3030
3031 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3032
3033 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3034
3035 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3036
3037 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3038 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3039
3040 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3041 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3042
3043 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3044 IXGBE_ADVTXD_POPTS_SHIFT;
3045
3046 /* use index 1 context for tso */
3047 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3048 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3049 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3050 IXGBE_ADVTXD_POPTS_SHIFT;
3051
3052 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3053 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3054 IXGBE_ADVTXD_POPTS_SHIFT;
3055
3056 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3057
3058 i = tx_ring->next_to_use;
3059 while (count--) {
3060 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3061 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3062 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3063 tx_desc->read.cmd_type_len =
3064 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3065 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3066 i++;
3067 if (i == tx_ring->count)
3068 i = 0;
3069 }
3070
3071 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3072
3073 /*
3074 * Force memory writes to complete before letting h/w
3075 * know there are new descriptors to fetch. (Only
3076 * applicable for weak-ordered memory model archs,
3077 * such as IA-64).
3078 */
3079 wmb();
3080
3081 tx_ring->next_to_use = i;
3082 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3083}
3084
3085static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
3086 struct ixgbevf_ring *tx_ring, int size)
3087{
3088 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3089
3090 netif_stop_subqueue(netdev, tx_ring->queue_index);
3091 /* Herbert's original patch had:
3092 * smp_mb__after_netif_stop_queue();
3093 * but since that doesn't exist yet, just open code it. */
3094 smp_mb();
3095
3096 /* We need to check again in a case another CPU has just
3097 * made room available. */
3098 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3099 return -EBUSY;
3100
3101 /* A reprieve! - use start_queue because it doesn't call schedule */
3102 netif_start_subqueue(netdev, tx_ring->queue_index);
3103 ++adapter->restart_queue;
3104 return 0;
3105}
3106
3107static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
3108 struct ixgbevf_ring *tx_ring, int size)
3109{
3110 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3111 return 0;
3112 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
3113}
3114
3115static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3116{
3117 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3118 struct ixgbevf_ring *tx_ring;
3119 unsigned int first;
3120 unsigned int tx_flags = 0;
3121 u8 hdr_len = 0;
3122 int r_idx = 0, tso;
3123 int count = 0;
3124
3125 unsigned int f;
3126
3127 tx_ring = &adapter->tx_ring[r_idx];
3128
3129 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3130 tx_flags |= vlan_tx_tag_get(skb);
3131 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3132 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3133 }
3134
3135 /* four things can cause us to need a context descriptor */
3136 if (skb_is_gso(skb) ||
3137 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3138 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3139 count++;
3140
3141 count += TXD_USE_COUNT(skb_headlen(skb));
3142 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3143 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3144
3145 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
3146 adapter->tx_busy++;
3147 return NETDEV_TX_BUSY;
3148 }
3149
3150 first = tx_ring->next_to_use;
3151
3152 if (skb->protocol == htons(ETH_P_IP))
3153 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3154 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3155 if (tso < 0) {
3156 dev_kfree_skb_any(skb);
3157 return NETDEV_TX_OK;
3158 }
3159
3160 if (tso)
3161 tx_flags |= IXGBE_TX_FLAGS_TSO;
3162 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3163 (skb->ip_summed == CHECKSUM_PARTIAL))
3164 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3165
3166 ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
3167 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3168 skb->len, hdr_len);
3169
3170 netdev->trans_start = jiffies;
3171
3172 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3173
3174 return NETDEV_TX_OK;
3175}
3176
3177/**
3178 * ixgbevf_get_stats - Get System Network Statistics
3179 * @netdev: network interface device structure
3180 *
3181 * Returns the address of the device statistics structure.
3182 * The statistics are actually updated from the timer callback.
3183 **/
3184static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
3185{
3186 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3187
3188 /* only return the current stats */
3189 return &adapter->net_stats;
3190}
3191
3192/**
3193 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3194 * @netdev: network interface device structure
3195 * @p: pointer to an address structure
3196 *
3197 * Returns 0 on success, negative on failure
3198 **/
3199static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3200{
3201 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3202 struct ixgbe_hw *hw = &adapter->hw;
3203 struct sockaddr *addr = p;
3204
3205 if (!is_valid_ether_addr(addr->sa_data))
3206 return -EADDRNOTAVAIL;
3207
3208 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3209 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3210
3211 if (hw->mac.ops.set_rar)
3212 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3213
3214 return 0;
3215}
3216
3217/**
3218 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3219 * @netdev: network interface device structure
3220 * @new_mtu: new value for maximum frame size
3221 *
3222 * Returns 0 on success, negative on failure
3223 **/
3224static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3225{
3226 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3227 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3228
3229 /* MTU < 68 is an error and causes problems on some kernels */
3230 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
3231 return -EINVAL;
3232
3233 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3234 netdev->mtu, new_mtu);
3235 /* must set new MTU before calling down or up */
3236 netdev->mtu = new_mtu;
3237
3238 if (netif_running(netdev))
3239 ixgbevf_reinit_locked(adapter);
3240
3241 return 0;
3242}
3243
3244static void ixgbevf_shutdown(struct pci_dev *pdev)
3245{
3246 struct net_device *netdev = pci_get_drvdata(pdev);
3247 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3248
3249 netif_device_detach(netdev);
3250
3251 if (netif_running(netdev)) {
3252 ixgbevf_down(adapter);
3253 ixgbevf_free_irq(adapter);
3254 ixgbevf_free_all_tx_resources(adapter);
3255 ixgbevf_free_all_rx_resources(adapter);
3256 }
3257
3258#ifdef CONFIG_PM
3259 pci_save_state(pdev);
3260#endif
3261
3262 pci_disable_device(pdev);
3263}
3264
3265static const struct net_device_ops ixgbe_netdev_ops = {
3266 .ndo_open = &ixgbevf_open,
3267 .ndo_stop = &ixgbevf_close,
3268 .ndo_start_xmit = &ixgbevf_xmit_frame,
3269 .ndo_get_stats = &ixgbevf_get_stats,
3270 .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
3271 .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
3272 .ndo_validate_addr = eth_validate_addr,
3273 .ndo_set_mac_address = &ixgbevf_set_mac,
3274 .ndo_change_mtu = &ixgbevf_change_mtu,
3275 .ndo_tx_timeout = &ixgbevf_tx_timeout,
3276 .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
3277 .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
3278 .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
3279};
3280
3281static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3282{
3283 struct ixgbevf_adapter *adapter;
3284 adapter = netdev_priv(dev);
3285 dev->netdev_ops = &ixgbe_netdev_ops;
3286 ixgbevf_set_ethtool_ops(dev);
3287 dev->watchdog_timeo = 5 * HZ;
3288}
3289
3290/**
3291 * ixgbevf_probe - Device Initialization Routine
3292 * @pdev: PCI device information struct
3293 * @ent: entry in ixgbevf_pci_tbl
3294 *
3295 * Returns 0 on success, negative on failure
3296 *
3297 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3298 * The OS initialization, configuring of the adapter private structure,
3299 * and a hardware reset occur.
3300 **/
3301static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3302 const struct pci_device_id *ent)
3303{
3304 struct net_device *netdev;
3305 struct ixgbevf_adapter *adapter = NULL;
3306 struct ixgbe_hw *hw = NULL;
3307 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3308 static int cards_found;
3309 int err, pci_using_dac;
3310
3311 err = pci_enable_device(pdev);
3312 if (err)
3313 return err;
3314
3315 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3316 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3317 pci_using_dac = 1;
3318 } else {
3319 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3320 if (err) {
3321 err = pci_set_consistent_dma_mask(pdev,
3322 DMA_BIT_MASK(32));
3323 if (err) {
3324 dev_err(&pdev->dev, "No usable DMA "
3325 "configuration, aborting\n");
3326 goto err_dma;
3327 }
3328 }
3329 pci_using_dac = 0;
3330 }
3331
3332 err = pci_request_regions(pdev, ixgbevf_driver_name);
3333 if (err) {
3334 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3335 goto err_pci_reg;
3336 }
3337
3338 pci_set_master(pdev);
3339
3340#ifdef HAVE_TX_MQ
3341 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3342 MAX_TX_QUEUES);
3343#else
3344 netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
3345#endif
3346 if (!netdev) {
3347 err = -ENOMEM;
3348 goto err_alloc_etherdev;
3349 }
3350
3351 SET_NETDEV_DEV(netdev, &pdev->dev);
3352
3353 pci_set_drvdata(pdev, netdev);
3354 adapter = netdev_priv(netdev);
3355
3356 adapter->netdev = netdev;
3357 adapter->pdev = pdev;
3358 hw = &adapter->hw;
3359 hw->back = adapter;
3360 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3361
3362 /*
3363 * call save state here in standalone driver because it relies on
3364 * adapter struct to exist, and needs to call netdev_priv
3365 */
3366 pci_save_state(pdev);
3367
3368 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3369 pci_resource_len(pdev, 0));
3370 if (!hw->hw_addr) {
3371 err = -EIO;
3372 goto err_ioremap;
3373 }
3374
3375 ixgbevf_assign_netdev_ops(netdev);
3376
3377 adapter->bd_number = cards_found;
3378
3379 /* Setup hw api */
3380 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3381 hw->mac.type = ii->mac;
3382
3383 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3384 sizeof(struct ixgbe_mac_operations));
3385
3386 adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
3387 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3388 adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
3389
3390 /* setup the private structure */
3391 err = ixgbevf_sw_init(adapter);
3392
3393 ixgbevf_init_last_counter_stats(adapter);
3394
3395#ifdef MAX_SKB_FRAGS
3396 netdev->features = NETIF_F_SG |
3397 NETIF_F_IP_CSUM |
3398 NETIF_F_HW_VLAN_TX |
3399 NETIF_F_HW_VLAN_RX |
3400 NETIF_F_HW_VLAN_FILTER;
3401
3402 netdev->features |= NETIF_F_IPV6_CSUM;
3403 netdev->features |= NETIF_F_TSO;
3404 netdev->features |= NETIF_F_TSO6;
3405 netdev->vlan_features |= NETIF_F_TSO;
3406 netdev->vlan_features |= NETIF_F_TSO6;
3407 netdev->vlan_features |= NETIF_F_IP_CSUM;
3408 netdev->vlan_features |= NETIF_F_SG;
3409
3410 if (pci_using_dac)
3411 netdev->features |= NETIF_F_HIGHDMA;
3412
3413#endif /* MAX_SKB_FRAGS */
3414
3415 /* The HW MAC address was set and/or determined in sw_init */
3416 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
3417 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3418
3419 if (!is_valid_ether_addr(netdev->dev_addr)) {
3420 printk(KERN_ERR "invalid MAC address\n");
3421 err = -EIO;
3422 goto err_sw_init;
3423 }
3424
3425 init_timer(&adapter->watchdog_timer);
3426 adapter->watchdog_timer.function = &ixgbevf_watchdog;
3427 adapter->watchdog_timer.data = (unsigned long)adapter;
3428
3429 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3430 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3431
3432 err = ixgbevf_init_interrupt_scheme(adapter);
3433 if (err)
3434 goto err_sw_init;
3435
3436 /* pick up the PCI bus settings for reporting later */
3437 if (hw->mac.ops.get_bus_info)
3438 hw->mac.ops.get_bus_info(hw);
3439
3440
3441 netif_carrier_off(netdev);
3442 netif_tx_stop_all_queues(netdev);
3443
3444 strcpy(netdev->name, "eth%d");
3445
3446 err = register_netdev(netdev);
3447 if (err)
3448 goto err_register;
3449
3450 adapter->netdev_registered = true;
3451
3452 /* print the MAC address */
3453 hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3454 netdev->dev_addr[0],
3455 netdev->dev_addr[1],
3456 netdev->dev_addr[2],
3457 netdev->dev_addr[3],
3458 netdev->dev_addr[4],
3459 netdev->dev_addr[5]);
3460
3461 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3462
3463 hw_dbg(hw, "LRO is disabled \n");
3464
3465 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3466 cards_found++;
3467 return 0;
3468
3469err_register:
3470err_sw_init:
3471 ixgbevf_reset_interrupt_capability(adapter);
3472 iounmap(hw->hw_addr);
3473err_ioremap:
3474 free_netdev(netdev);
3475err_alloc_etherdev:
3476 pci_release_regions(pdev);
3477err_pci_reg:
3478err_dma:
3479 pci_disable_device(pdev);
3480 return err;
3481}
3482
3483/**
3484 * ixgbevf_remove - Device Removal Routine
3485 * @pdev: PCI device information struct
3486 *
3487 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3488 * that it should release a PCI device. The could be caused by a
3489 * Hot-Plug event, or because the driver is going to be removed from
3490 * memory.
3491 **/
3492static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3493{
3494 struct net_device *netdev = pci_get_drvdata(pdev);
3495 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3496
3497 set_bit(__IXGBEVF_DOWN, &adapter->state);
3498
3499 del_timer_sync(&adapter->watchdog_timer);
3500
3501 cancel_work_sync(&adapter->watchdog_task);
3502
3503 flush_scheduled_work();
3504
3505 if (adapter->netdev_registered) {
3506 unregister_netdev(netdev);
3507 adapter->netdev_registered = false;
3508 }
3509
3510 ixgbevf_reset_interrupt_capability(adapter);
3511
3512 iounmap(adapter->hw.hw_addr);
3513 pci_release_regions(pdev);
3514
3515 hw_dbg(&adapter->hw, "Remove complete\n");
3516
3517 kfree(adapter->tx_ring);
3518 kfree(adapter->rx_ring);
3519
3520 free_netdev(netdev);
3521
3522 pci_disable_device(pdev);
3523}
3524
3525static struct pci_driver ixgbevf_driver = {
3526 .name = ixgbevf_driver_name,
3527 .id_table = ixgbevf_pci_tbl,
3528 .probe = ixgbevf_probe,
3529 .remove = __devexit_p(ixgbevf_remove),
3530 .shutdown = ixgbevf_shutdown,
3531};
3532
3533/**
3534 * ixgbe_init_module - Driver Registration Routine
3535 *
3536 * ixgbe_init_module is the first routine called when the driver is
3537 * loaded. All it does is register with the PCI subsystem.
3538 **/
3539static int __init ixgbevf_init_module(void)
3540{
3541 int ret;
3542 printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
3543 ixgbevf_driver_version);
3544
3545 printk(KERN_INFO "%s\n", ixgbevf_copyright);
3546
3547 ret = pci_register_driver(&ixgbevf_driver);
3548 return ret;
3549}
3550
3551module_init(ixgbevf_init_module);
3552
3553/**
3554 * ixgbe_exit_module - Driver Exit Cleanup Routine
3555 *
3556 * ixgbe_exit_module is called just before the driver is removed
3557 * from memory.
3558 **/
3559static void __exit ixgbevf_exit_module(void)
3560{
3561 pci_unregister_driver(&ixgbevf_driver);
3562}
3563
3564#ifdef DEBUG
3565/**
3566 * ixgbe_get_hw_dev_name - return device name string
3567 * used by hardware layer to print debugging information
3568 **/
3569char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3570{
3571 struct ixgbevf_adapter *adapter = hw->back;
3572 return adapter->netdev->name;
3573}
3574
3575#endif
3576module_exit(ixgbevf_exit_module);
3577
3578/* ixgbevf_main.c */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
new file mode 100644
index 00000000000..b8143501e6f
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "mbx.h"
29
30/**
31 * ixgbevf_poll_for_msg - Wait for message notification
32 * @hw: pointer to the HW structure
33 *
34 * returns 0 if it successfully received a message notification
35 **/
36static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
37{
38 struct ixgbe_mbx_info *mbx = &hw->mbx;
39 int countdown = mbx->timeout;
40
41 while (countdown && mbx->ops.check_for_msg(hw)) {
42 countdown--;
43 udelay(mbx->udelay);
44 }
45
46 /* if we failed, all future posted messages fail until reset */
47 if (!countdown)
48 mbx->timeout = 0;
49
50 return countdown ? 0 : IXGBE_ERR_MBX;
51}
52
53/**
54 * ixgbevf_poll_for_ack - Wait for message acknowledgement
55 * @hw: pointer to the HW structure
56 *
57 * returns 0 if it successfully received a message acknowledgement
58 **/
59static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
60{
61 struct ixgbe_mbx_info *mbx = &hw->mbx;
62 int countdown = mbx->timeout;
63
64 while (countdown && mbx->ops.check_for_ack(hw)) {
65 countdown--;
66 udelay(mbx->udelay);
67 }
68
69 /* if we failed, all future posted messages fail until reset */
70 if (!countdown)
71 mbx->timeout = 0;
72
73 return countdown ? 0 : IXGBE_ERR_MBX;
74}
75
76/**
77 * ixgbevf_read_posted_mbx - Wait for message notification and receive message
78 * @hw: pointer to the HW structure
79 * @msg: The message buffer
80 * @size: Length of buffer
81 *
82 * returns 0 if it successfully received a message notification and
83 * copied it into the receive buffer.
84 **/
85static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
86{
87 struct ixgbe_mbx_info *mbx = &hw->mbx;
88 s32 ret_val = IXGBE_ERR_MBX;
89
90 ret_val = ixgbevf_poll_for_msg(hw);
91
92 /* if ack received read message, otherwise we timed out */
93 if (!ret_val)
94 ret_val = mbx->ops.read(hw, msg, size);
95
96 return ret_val;
97}
98
99/**
100 * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
101 * @hw: pointer to the HW structure
102 * @msg: The message buffer
103 * @size: Length of buffer
104 *
105 * returns 0 if it successfully copied message into the buffer and
106 * received an ack to that message within delay * timeout period
107 **/
108static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
109{
110 struct ixgbe_mbx_info *mbx = &hw->mbx;
111 s32 ret_val;
112
113 /* send msg */
114 ret_val = mbx->ops.write(hw, msg, size);
115
116 /* if msg sent wait until we receive an ack */
117 if (!ret_val)
118 ret_val = ixgbevf_poll_for_ack(hw);
119
120 return ret_val;
121}
122
123/**
124 * ixgbevf_read_v2p_mailbox - read v2p mailbox
125 * @hw: pointer to the HW structure
126 *
127 * This function is used to read the v2p mailbox without losing the read to
128 * clear status bits.
129 **/
130static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
131{
132 u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
133
134 v2p_mailbox |= hw->mbx.v2p_mailbox;
135 hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
136
137 return v2p_mailbox;
138}
139
140/**
141 * ixgbevf_check_for_bit_vf - Determine if a status bit was set
142 * @hw: pointer to the HW structure
143 * @mask: bitmask for bits to be tested and cleared
144 *
145 * This function is used to check for the read to clear bits within
146 * the V2P mailbox.
147 **/
148static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
149{
150 u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
151 s32 ret_val = IXGBE_ERR_MBX;
152
153 if (v2p_mailbox & mask)
154 ret_val = 0;
155
156 hw->mbx.v2p_mailbox &= ~mask;
157
158 return ret_val;
159}
160
161/**
162 * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
163 * @hw: pointer to the HW structure
164 *
165 * returns 0 if the PF has set the Status bit or else ERR_MBX
166 **/
167static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
168{
169 s32 ret_val = IXGBE_ERR_MBX;
170
171 if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
172 ret_val = 0;
173 hw->mbx.stats.reqs++;
174 }
175
176 return ret_val;
177}
178
179/**
180 * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
181 * @hw: pointer to the HW structure
182 *
183 * returns 0 if the PF has set the ACK bit or else ERR_MBX
184 **/
185static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
186{
187 s32 ret_val = IXGBE_ERR_MBX;
188
189 if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
190 ret_val = 0;
191 hw->mbx.stats.acks++;
192 }
193
194 return ret_val;
195}
196
197/**
198 * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
199 * @hw: pointer to the HW structure
200 *
201 * returns true if the PF has set the reset done bit or else false
202 **/
203static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
204{
205 s32 ret_val = IXGBE_ERR_MBX;
206
207 if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
208 IXGBE_VFMAILBOX_RSTI))) {
209 ret_val = 0;
210 hw->mbx.stats.rsts++;
211 }
212
213 return ret_val;
214}
215
216/**
217 * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
218 * @hw: pointer to the HW structure
219 *
220 * return 0 if we obtained the mailbox lock
221 **/
222static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
223{
224 s32 ret_val = IXGBE_ERR_MBX;
225
226 /* Take ownership of the buffer */
227 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
228
229 /* reserve mailbox for vf use */
230 if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
231 ret_val = 0;
232
233 return ret_val;
234}
235
236/**
237 * ixgbevf_write_mbx_vf - Write a message to the mailbox
238 * @hw: pointer to the HW structure
239 * @msg: The message buffer
240 * @size: Length of buffer
241 *
242 * returns 0 if it successfully copied message into the buffer
243 **/
244static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
245{
246 s32 ret_val;
247 u16 i;
248
249
250 /* lock the mailbox to prevent pf/vf race condition */
251 ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
252 if (ret_val)
253 goto out_no_write;
254
255 /* flush msg and acks as we are overwriting the message buffer */
256 ixgbevf_check_for_msg_vf(hw);
257 ixgbevf_check_for_ack_vf(hw);
258
259 /* copy the caller specified message to the mailbox memory buffer */
260 for (i = 0; i < size; i++)
261 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
262
263 /* update stats */
264 hw->mbx.stats.msgs_tx++;
265
266 /* Drop VFU and interrupt the PF to tell it a message has been sent */
267 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
268
269out_no_write:
270 return ret_val;
271}
272
273/**
274 * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
275 * @hw: pointer to the HW structure
276 * @msg: The message buffer
277 * @size: Length of buffer
278 *
279 * returns 0 if it successfuly read message from buffer
280 **/
281static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
282{
283 s32 ret_val = 0;
284 u16 i;
285
286 /* lock the mailbox to prevent pf/vf race condition */
287 ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
288 if (ret_val)
289 goto out_no_read;
290
291 /* copy the message from the mailbox memory buffer */
292 for (i = 0; i < size; i++)
293 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
294
295 /* Acknowledge receipt and release mailbox, then we're done */
296 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
297
298 /* update stats */
299 hw->mbx.stats.msgs_rx++;
300
301out_no_read:
302 return ret_val;
303}
304
305/**
306 * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
307 * @hw: pointer to the HW structure
308 *
309 * Initializes the hw->mbx struct to correct values for vf mailbox
310 */
311s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
312{
313 struct ixgbe_mbx_info *mbx = &hw->mbx;
314
315 /* start mailbox as timed out and let the reset_hw call set the timeout
316 * value to begin communications */
317 mbx->timeout = 0;
318 mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
319
320 mbx->size = IXGBE_VFMAILBOX_SIZE;
321
322 mbx->stats.msgs_tx = 0;
323 mbx->stats.msgs_rx = 0;
324 mbx->stats.reqs = 0;
325 mbx->stats.acks = 0;
326 mbx->stats.rsts = 0;
327
328 return 0;
329}
330
331struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
332 .init_params = ixgbevf_init_mbx_params_vf,
333 .read = ixgbevf_read_mbx_vf,
334 .write = ixgbevf_write_mbx_vf,
335 .read_posted = ixgbevf_read_posted_mbx,
336 .write_posted = ixgbevf_write_posted_mbx,
337 .check_for_msg = ixgbevf_check_for_msg_vf,
338 .check_for_ack = ixgbevf_check_for_ack_vf,
339 .check_for_rst = ixgbevf_check_for_rst_vf,
340};
341
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
new file mode 100644
index 00000000000..1b0e0bf4c0f
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.h
@@ -0,0 +1,100 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_MBX_H_
29#define _IXGBE_MBX_H_
30
31#include "vf.h"
32
33#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
34#define IXGBE_ERR_MBX -100
35
36#define IXGBE_VFMAILBOX 0x002FC
37#define IXGBE_VFMBMEM 0x00200
38
39/* Define mailbox register bits */
40#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
41#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
42#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
43#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
44#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
45#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
46#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
47#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
48#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
49
50#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
51#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
52
53#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
54#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
55#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
56#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
57#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
58
59#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
60#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
61#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
62#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
63
64
65/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
66 * PF. The reverse is true if it is IXGBE_PF_*.
67 * Message ACK's are the value or'd with 0xF0000000
68 */
69#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
70 * this are the ACK */
71#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
72 * this are the NACK */
73#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
74 * clear to send requests */
75#define IXGBE_VT_MSGINFO_SHIFT 16
76/* bits 23:16 are used for exra info for certain messages */
77#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
78
79#define IXGBE_VF_RESET 0x01 /* VF requests reset */
80#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
81#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
82#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
83#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
84
85/* length of permanent address message returned from PF */
86#define IXGBE_VF_PERMADDR_MSG_LEN 4
87/* word in permanent address message with the current multicast type */
88#define IXGBE_VF_MC_TYPE_WORD 3
89
90#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
91
92#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
93#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
94
95/* forward declaration of the HW struct */
96struct ixgbe_hw;
97
98s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
99
100#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
new file mode 100644
index 00000000000..12f75960aec
--- /dev/null
+++ b/drivers/net/ixgbevf/regs.h
@@ -0,0 +1,85 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBEVF_REGS_H_
29#define _IXGBEVF_REGS_H_
30
31#define IXGBE_VFCTRL 0x00000
32#define IXGBE_VFSTATUS 0x00008
33#define IXGBE_VFLINKS 0x00010
34#define IXGBE_VFRTIMER 0x00048
35#define IXGBE_VFRXMEMWRAP 0x03190
36#define IXGBE_VTEICR 0x00100
37#define IXGBE_VTEICS 0x00104
38#define IXGBE_VTEIMS 0x00108
39#define IXGBE_VTEIMC 0x0010C
40#define IXGBE_VTEIAC 0x00110
41#define IXGBE_VTEIAM 0x00114
42#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
43#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
44#define IXGBE_VTIVAR_MISC 0x00140
45#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
46#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
47#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
48#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
49#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
50#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
51#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
52#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
53#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
54#define IXGBE_VFPSRTYPE 0x00300
55#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
56#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
57#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
58#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
59#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
60#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
61#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
62#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
63#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
64#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
65#define IXGBE_VFGPRC 0x0101C
66#define IXGBE_VFGPTC 0x0201C
67#define IXGBE_VFGORC_LSB 0x01020
68#define IXGBE_VFGORC_MSB 0x01024
69#define IXGBE_VFGOTC_LSB 0x02020
70#define IXGBE_VFGOTC_MSB 0x02024
71#define IXGBE_VFMPRC 0x01034
72
73#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
74
75#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
76
77#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
78 writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
79
80#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
81 readl((a)->hw_addr + (reg) + ((offset) << 2)))
82
83#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
84
85#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
new file mode 100644
index 00000000000..4b5dec0ec14
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.c
@@ -0,0 +1,387 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "vf.h"
29
30/**
31 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
32 * @hw: pointer to hardware structure
33 *
34 * Starts the hardware by filling the bus info structure and media type, clears
35 * all on chip counters, initializes receive address registers, multicast
36 * table, VLAN filter table, calls routine to set up link and flow control
37 * settings, and leaves transmit and receive units disabled and uninitialized
38 **/
39static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
40{
41 /* Clear adapter stopped flag */
42 hw->adapter_stopped = false;
43
44 return 0;
45}
46
47/**
48 * ixgbevf_init_hw_vf - virtual function hardware initialization
49 * @hw: pointer to hardware structure
50 *
51 * Initialize the hardware by resetting the hardware and then starting
52 * the hardware
53 **/
54static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
55{
56 s32 status = hw->mac.ops.start_hw(hw);
57
58 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
59
60 return status;
61}
62
63/**
64 * ixgbevf_reset_hw_vf - Performs hardware reset
65 * @hw: pointer to hardware structure
66 *
67 * Resets the hardware by reseting the transmit and receive units, masks and
68 * clears all interrupts.
69 **/
70static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
71{
72 struct ixgbe_mbx_info *mbx = &hw->mbx;
73 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
74 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
75 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
76 u8 *addr = (u8 *)(&msgbuf[1]);
77
78 /* Call adapter stop to disable tx/rx and clear interrupts */
79 hw->mac.ops.stop_adapter(hw);
80
81 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
82 IXGBE_WRITE_FLUSH(hw);
83
84 /* we cannot reset while the RSTI / RSTD bits are asserted */
85 while (!mbx->ops.check_for_rst(hw) && timeout) {
86 timeout--;
87 udelay(5);
88 }
89
90 if (!timeout)
91 return IXGBE_ERR_RESET_FAILED;
92
93 /* mailbox timeout can now become active */
94 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
95
96 msgbuf[0] = IXGBE_VF_RESET;
97 mbx->ops.write_posted(hw, msgbuf, 1);
98
99 msleep(10);
100
101 /* set our "perm_addr" based on info provided by PF */
102 /* also set up the mc_filter_type which is piggy backed
103 * on the mac address in word 3 */
104 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
105 if (ret_val)
106 return ret_val;
107
108 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
109 return IXGBE_ERR_INVALID_MAC_ADDR;
110
111 memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
112 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
113
114 return 0;
115}
116
117/**
118 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
119 * @hw: pointer to hardware structure
120 *
121 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
122 * disables transmit and receive units. The adapter_stopped flag is used by
123 * the shared code and drivers to determine if the adapter is in a stopped
124 * state and should not touch the hardware.
125 **/
126static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
127{
128 u32 number_of_queues;
129 u32 reg_val;
130 u16 i;
131
132 /*
133 * Set the adapter_stopped flag so other driver functions stop touching
134 * the hardware
135 */
136 hw->adapter_stopped = true;
137
138 /* Disable the receive unit by stopped each queue */
139 number_of_queues = hw->mac.max_rx_queues;
140 for (i = 0; i < number_of_queues; i++) {
141 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
142 if (reg_val & IXGBE_RXDCTL_ENABLE) {
143 reg_val &= ~IXGBE_RXDCTL_ENABLE;
144 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
145 }
146 }
147
148 IXGBE_WRITE_FLUSH(hw);
149
150 /* Clear interrupt mask to stop from interrupts being generated */
151 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
152
153 /* Clear any pending interrupts */
154 IXGBE_READ_REG(hw, IXGBE_VTEICR);
155
156 /* Disable the transmit unit. Each queue must be disabled. */
157 number_of_queues = hw->mac.max_tx_queues;
158 for (i = 0; i < number_of_queues; i++) {
159 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
160 if (reg_val & IXGBE_TXDCTL_ENABLE) {
161 reg_val &= ~IXGBE_TXDCTL_ENABLE;
162 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
163 }
164 }
165
166 return 0;
167}
168
169/**
170 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
171 * @hw: pointer to hardware structure
172 * @mc_addr: the multicast address
173 *
174 * Extracts the 12 bits, from a multicast address, to determine which
175 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
176 * incoming rx multicast addresses, to determine the bit-vector to check in
177 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
178 * by the MO field of the MCSTCTRL. The MO field is set during initialization
179 * to mc_filter_type.
180 **/
181static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
182{
183 u32 vector = 0;
184
185 switch (hw->mac.mc_filter_type) {
186 case 0: /* use bits [47:36] of the address */
187 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
188 break;
189 case 1: /* use bits [46:35] of the address */
190 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
191 break;
192 case 2: /* use bits [45:34] of the address */
193 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
194 break;
195 case 3: /* use bits [43:32] of the address */
196 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
197 break;
198 default: /* Invalid mc_filter_type */
199 break;
200 }
201
202 /* vector can only be 12-bits or boundary will be exceeded */
203 vector &= 0xFFF;
204 return vector;
205}
206
207/**
208 * ixgbevf_get_mac_addr_vf - Read device MAC address
209 * @hw: pointer to the HW structure
210 * @mac_addr: pointer to storage for retrieved MAC address
211 **/
212static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
213{
214 memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
215
216 return 0;
217}
218
219/**
220 * ixgbevf_set_rar_vf - set device MAC address
221 * @hw: pointer to hardware structure
222 * @index: Receive address register to write
223 * @addr: Address to put into receive address register
224 * @vmdq: Unused in this implementation
225 **/
226static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
227 u32 vmdq)
228{
229 struct ixgbe_mbx_info *mbx = &hw->mbx;
230 u32 msgbuf[3];
231 u8 *msg_addr = (u8 *)(&msgbuf[1]);
232 s32 ret_val;
233
234 memset(msgbuf, 0, sizeof(msgbuf));
235 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
236 memcpy(msg_addr, addr, 6);
237 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
238
239 if (!ret_val)
240 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
241
242 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
243
244 /* if nacked the address was rejected, use "perm_addr" */
245 if (!ret_val &&
246 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
247 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
248
249 return ret_val;
250}
251
252/**
253 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
254 * @hw: pointer to the HW structure
255 * @mc_addr_list: array of multicast addresses to program
256 * @mc_addr_count: number of multicast addresses to program
257 * @next: caller supplied function to return next address in list
258 *
259 * Updates the Multicast Table Array.
260 **/
261static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
262 u32 mc_addr_count,
263 ixgbe_mc_addr_itr next)
264{
265 struct ixgbe_mbx_info *mbx = &hw->mbx;
266 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
267 u16 *vector_list = (u16 *)&msgbuf[1];
268 u32 vector;
269 u32 cnt, i;
270 u32 vmdq;
271
272 /* Each entry in the list uses 1 16 bit word. We have 30
273 * 16 bit words available in our HW msg buffer (minus 1 for the
274 * msg type). That's 30 hash values if we pack 'em right. If
275 * there are more than 30 MC addresses to add then punt the
276 * extras for now and then add code to handle more than 30 later.
277 * It would be unusual for a server to request that many multi-cast
278 * addresses except for in large enterprise network environments.
279 */
280
281 cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
282 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
283 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
284
285 for (i = 0; i < cnt; i++) {
286 vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
287 vector_list[i] = vector;
288 }
289
290 mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
291
292 return 0;
293}
294
295/**
296 * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
297 * @hw: pointer to the HW structure
298 * @vlan: 12 bit VLAN ID
299 * @vind: unused by VF drivers
300 * @vlan_on: if true then set bit, else clear bit
301 **/
302static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
303 bool vlan_on)
304{
305 struct ixgbe_mbx_info *mbx = &hw->mbx;
306 u32 msgbuf[2];
307
308 msgbuf[0] = IXGBE_VF_SET_VLAN;
309 msgbuf[1] = vlan;
310 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
311 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
312
313 return mbx->ops.write_posted(hw, msgbuf, 2);
314}
315
316/**
317 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
318 * @hw: pointer to hardware structure
319 * @speed: Unused in this implementation
320 * @autoneg: Unused in this implementation
321 * @autoneg_wait_to_complete: Unused in this implementation
322 *
323 * Do nothing and return success. VF drivers are not allowed to change
324 * global settings. Maintained for driver compatibility.
325 **/
326static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
327 ixgbe_link_speed speed, bool autoneg,
328 bool autoneg_wait_to_complete)
329{
330 return 0;
331}
332
333/**
334 * ixgbevf_check_mac_link_vf - Get link/speed status
335 * @hw: pointer to hardware structure
336 * @speed: pointer to link speed
337 * @link_up: true is link is up, false otherwise
338 * @autoneg_wait_to_complete: true when waiting for completion is needed
339 *
340 * Reads the links register to determine if link is up and the current speed
341 **/
342static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
343 ixgbe_link_speed *speed,
344 bool *link_up,
345 bool autoneg_wait_to_complete)
346{
347 u32 links_reg;
348
349 if (!(hw->mbx.ops.check_for_rst(hw))) {
350 *link_up = false;
351 *speed = 0;
352 return -1;
353 }
354
355 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
356
357 if (links_reg & IXGBE_LINKS_UP)
358 *link_up = true;
359 else
360 *link_up = false;
361
362 if (links_reg & IXGBE_LINKS_SPEED)
363 *speed = IXGBE_LINK_SPEED_10GB_FULL;
364 else
365 *speed = IXGBE_LINK_SPEED_1GB_FULL;
366
367 return 0;
368}
369
370struct ixgbe_mac_operations ixgbevf_mac_ops = {
371 .init_hw = ixgbevf_init_hw_vf,
372 .reset_hw = ixgbevf_reset_hw_vf,
373 .start_hw = ixgbevf_start_hw_vf,
374 .get_mac_addr = ixgbevf_get_mac_addr_vf,
375 .stop_adapter = ixgbevf_stop_hw_vf,
376 .setup_link = ixgbevf_setup_mac_link_vf,
377 .check_link = ixgbevf_check_mac_link_vf,
378 .set_rar = ixgbevf_set_rar_vf,
379 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
380 .set_vfta = ixgbevf_set_vfta_vf,
381};
382
383struct ixgbevf_info ixgbevf_vf_info = {
384 .mac = ixgbe_mac_82599_vf,
385 .mac_ops = &ixgbevf_mac_ops,
386};
387
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
new file mode 100644
index 00000000000..799600e9270
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.h
@@ -0,0 +1,168 @@
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef __IXGBE_VF_H__
29#define __IXGBE_VF_H__
30
31#include <linux/pci.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/if_ether.h>
35
36#include "defines.h"
37#include "regs.h"
38#include "mbx.h"
39
40struct ixgbe_hw;
41
42/* iterator type for walking multicast address lists */
43typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
44 u32 *vmdq);
45struct ixgbe_mac_operations {
46 s32 (*init_hw)(struct ixgbe_hw *);
47 s32 (*reset_hw)(struct ixgbe_hw *);
48 s32 (*start_hw)(struct ixgbe_hw *);
49 s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
50 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
51 u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
52 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
53 s32 (*stop_adapter)(struct ixgbe_hw *);
54 s32 (*get_bus_info)(struct ixgbe_hw *);
55
56 /* Link */
57 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
58 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
59 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
60 bool *);
61
62 /* RAR, Multicast, VLAN */
63 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
64 s32 (*init_rx_addrs)(struct ixgbe_hw *);
65 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
66 ixgbe_mc_addr_itr);
67 s32 (*enable_mc)(struct ixgbe_hw *);
68 s32 (*disable_mc)(struct ixgbe_hw *);
69 s32 (*clear_vfta)(struct ixgbe_hw *);
70 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
71};
72
73enum ixgbe_mac_type {
74 ixgbe_mac_unknown = 0,
75 ixgbe_mac_82599_vf,
76 ixgbe_num_macs
77};
78
79struct ixgbe_mac_info {
80 struct ixgbe_mac_operations ops;
81 u8 addr[6];
82 u8 perm_addr[6];
83
84 enum ixgbe_mac_type type;
85
86 s32 mc_filter_type;
87
88 bool get_link_status;
89 u32 max_tx_queues;
90 u32 max_rx_queues;
91 u32 max_msix_vectors;
92};
93
94struct ixgbe_mbx_operations {
95 s32 (*init_params)(struct ixgbe_hw *hw);
96 s32 (*read)(struct ixgbe_hw *, u32 *, u16);
97 s32 (*write)(struct ixgbe_hw *, u32 *, u16);
98 s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
99 s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
100 s32 (*check_for_msg)(struct ixgbe_hw *);
101 s32 (*check_for_ack)(struct ixgbe_hw *);
102 s32 (*check_for_rst)(struct ixgbe_hw *);
103};
104
105struct ixgbe_mbx_stats {
106 u32 msgs_tx;
107 u32 msgs_rx;
108
109 u32 acks;
110 u32 reqs;
111 u32 rsts;
112};
113
114struct ixgbe_mbx_info {
115 struct ixgbe_mbx_operations ops;
116 struct ixgbe_mbx_stats stats;
117 u32 timeout;
118 u32 udelay;
119 u32 v2p_mailbox;
120 u16 size;
121};
122
123struct ixgbe_hw {
124 void *back;
125
126 u8 __iomem *hw_addr;
127 u8 *flash_address;
128 unsigned long io_base;
129
130 struct ixgbe_mac_info mac;
131 struct ixgbe_mbx_info mbx;
132
133 u16 device_id;
134 u16 subsystem_vendor_id;
135 u16 subsystem_device_id;
136 u16 vendor_id;
137
138 u8 revision_id;
139 bool adapter_stopped;
140};
141
142struct ixgbevf_hw_stats {
143 u64 base_vfgprc;
144 u64 base_vfgptc;
145 u64 base_vfgorc;
146 u64 base_vfgotc;
147 u64 base_vfmprc;
148
149 u64 last_vfgprc;
150 u64 last_vfgptc;
151 u64 last_vfgorc;
152 u64 last_vfgotc;
153 u64 last_vfmprc;
154
155 u64 vfgprc;
156 u64 vfgptc;
157 u64 vfgorc;
158 u64 vfgotc;
159 u64 vfmprc;
160};
161
162struct ixgbevf_info {
163 enum ixgbe_mac_type mac;
164 struct ixgbe_mac_operations *mac_ops;
165};
166
167#endif /* __IXGBE_VF_H__ */
168
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 792b88fc357..26eed49d320 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2994,7 +2994,7 @@ jme_resume(struct pci_dev *pdev)
2994} 2994}
2995#endif 2995#endif
2996 2996
2997static struct pci_device_id jme_pci_tbl[] = { 2997static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
2998 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, 2998 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
2999 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, 2999 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3000 { } 3000 { }
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index b117f7f8b19..b60efd4bd01 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1094,11 +1094,9 @@ static int __devinit i82596_probe(struct net_device *dev)
1094 return i; 1094 return i;
1095 }; 1095 };
1096 1096
1097 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", 1097 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1098 dev->name, dev->base_addr)); 1098 dev->name, dev->base_addr, dev->dev_addr,
1099 for (i = 0; i < 6; i++) 1099 dev->irq));
1100 DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1101 DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1102 DEB(DEB_INIT, printk(KERN_INFO 1100 DEB(DEB_INIT, printk(KERN_INFO
1103 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", 1101 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1104 dev->name, dma, (int)sizeof(struct i596_dma), 1102 dev->name, dma, (int)sizeof(struct i596_dma),
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index f8fa0c3f0f6..a8768672dc5 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -17,6 +17,8 @@
17/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */ 17/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */
18/* 2003-12-26: Make sure Asante cards always work. */ 18/* 2003-12-26: Make sure Asante cards always work. */
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/types.h> 24#include <linux/types.h>
@@ -34,31 +36,36 @@
34#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
35#include <linux/skbuff.h> 37#include <linux/skbuff.h>
36#include <linux/bitops.h> 38#include <linux/bitops.h>
39#include <linux/io.h>
37 40
38#include <asm/system.h> 41#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/dma.h> 42#include <asm/dma.h>
41#include <asm/hwtest.h> 43#include <asm/hwtest.h>
42#include <asm/macints.h> 44#include <asm/macints.h>
43 45
44static char version[] = 46static char version[] =
45 "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n"; 47 "v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
46 48
47#define EI_SHIFT(x) (ei_local->reg_offset[x]) 49#define EI_SHIFT(x) (ei_local->reg_offset[x])
48#define ei_inb(port) in_8(port) 50#define ei_inb(port) in_8(port)
49#define ei_outb(val,port) out_8(port,val) 51#define ei_outb(val, port) out_8(port, val)
50#define ei_inb_p(port) in_8(port) 52#define ei_inb_p(port) in_8(port)
51#define ei_outb_p(val,port) out_8(port,val) 53#define ei_outb_p(val, port) out_8(port, val)
52 54
53#include "lib8390.c" 55#include "lib8390.c"
54 56
55#define WD_START_PG 0x00 /* First page of TX buffer */ 57#define WD_START_PG 0x00 /* First page of TX buffer */
56#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */ 58#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
57#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */ 59#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
58#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG /* First page of TX buffer */ 60#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG
61 /* First page of TX buffer */
59 62
60/* Unfortunately it seems we have to hardcode these for the moment */ 63/*
61/* Shouldn't the card know about this? Does anyone know where to read it off the card? Do we trust the data provided by the card? */ 64 * Unfortunately it seems we have to hardcode these for the moment
65 * Shouldn't the card know about this?
66 * Does anyone know where to read it off the card?
67 * Do we trust the data provided by the card?
68 */
62 69
63#define DAYNA_8390_BASE 0x80000 70#define DAYNA_8390_BASE 0x80000
64#define DAYNA_8390_MEM 0x00000 71#define DAYNA_8390_MEM 0x00000
@@ -80,7 +87,7 @@ enum mac8390_type {
80 MAC8390_KINETICS, 87 MAC8390_KINETICS,
81}; 88};
82 89
83static const char * cardname[] = { 90static const char *cardname[] = {
84 "apple", 91 "apple",
85 "asante", 92 "asante",
86 "farallon", 93 "farallon",
@@ -90,7 +97,7 @@ static const char * cardname[] = {
90 "kinetics", 97 "kinetics",
91}; 98};
92 99
93static int word16[] = { 100static const int word16[] = {
94 1, /* apple */ 101 1, /* apple */
95 1, /* asante */ 102 1, /* asante */
96 1, /* farallon */ 103 1, /* farallon */
@@ -101,7 +108,7 @@ static int word16[] = {
101}; 108};
102 109
103/* on which cards do we use NuBus resources? */ 110/* on which cards do we use NuBus resources? */
104static int useresources[] = { 111static const int useresources[] = {
105 1, /* apple */ 112 1, /* apple */
106 1, /* asante */ 113 1, /* asante */
107 1, /* farallon */ 114 1, /* farallon */
@@ -117,22 +124,22 @@ enum mac8390_access {
117 ACCESS_16, 124 ACCESS_16,
118}; 125};
119 126
120extern int mac8390_memtest(struct net_device * dev); 127extern int mac8390_memtest(struct net_device *dev);
121static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev, 128static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
122 enum mac8390_type type); 129 enum mac8390_type type);
123 130
124static int mac8390_open(struct net_device * dev); 131static int mac8390_open(struct net_device *dev);
125static int mac8390_close(struct net_device * dev); 132static int mac8390_close(struct net_device *dev);
126static void mac8390_no_reset(struct net_device *dev); 133static void mac8390_no_reset(struct net_device *dev);
127static void interlan_reset(struct net_device *dev); 134static void interlan_reset(struct net_device *dev);
128 135
129/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/ 136/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/
130static void sane_get_8390_hdr(struct net_device *dev, 137static void sane_get_8390_hdr(struct net_device *dev,
131 struct e8390_pkt_hdr *hdr, int ring_page); 138 struct e8390_pkt_hdr *hdr, int ring_page);
132static void sane_block_input(struct net_device * dev, int count, 139static void sane_block_input(struct net_device *dev, int count,
133 struct sk_buff * skb, int ring_offset); 140 struct sk_buff *skb, int ring_offset);
134static void sane_block_output(struct net_device * dev, int count, 141static void sane_block_output(struct net_device *dev, int count,
135 const unsigned char * buf, const int start_page); 142 const unsigned char *buf, const int start_page);
136 143
137/* dayna_memcpy to and from card */ 144/* dayna_memcpy to and from card */
138static void dayna_memcpy_fromcard(struct net_device *dev, void *to, 145static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
@@ -148,8 +155,8 @@ static void dayna_block_input(struct net_device *dev, int count,
148static void dayna_block_output(struct net_device *dev, int count, 155static void dayna_block_output(struct net_device *dev, int count,
149 const unsigned char *buf, int start_page); 156 const unsigned char *buf, int start_page);
150 157
151#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) 158#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
152#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) 159#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
153 160
154/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ 161/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
155static void slow_sane_get_8390_hdr(struct net_device *dev, 162static void slow_sane_get_8390_hdr(struct net_device *dev,
@@ -164,70 +171,72 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count);
164static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) 171static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
165{ 172{
166 switch (dev->dr_sw) { 173 switch (dev->dr_sw) {
167 case NUBUS_DRSW_3COM: 174 case NUBUS_DRSW_3COM:
168 switch (dev->dr_hw) { 175 switch (dev->dr_hw) {
169 case NUBUS_DRHW_APPLE_SONIC_NB: 176 case NUBUS_DRHW_APPLE_SONIC_NB:
170 case NUBUS_DRHW_APPLE_SONIC_LC: 177 case NUBUS_DRHW_APPLE_SONIC_LC:
171 case NUBUS_DRHW_SONNET: 178 case NUBUS_DRHW_SONNET:
172 return MAC8390_NONE; 179 return MAC8390_NONE;
173 break;
174 default:
175 return MAC8390_APPLE;
176 break;
177 }
178 break; 180 break;
179 181 default:
180 case NUBUS_DRSW_APPLE: 182 return MAC8390_APPLE;
181 switch (dev->dr_hw) {
182 case NUBUS_DRHW_ASANTE_LC:
183 return MAC8390_NONE;
184 break;
185 case NUBUS_DRHW_CABLETRON:
186 return MAC8390_CABLETRON;
187 break;
188 default:
189 return MAC8390_APPLE;
190 break;
191 }
192 break; 183 break;
184 }
185 break;
193 186
194 case NUBUS_DRSW_ASANTE: 187 case NUBUS_DRSW_APPLE:
195 return MAC8390_ASANTE; 188 switch (dev->dr_hw) {
189 case NUBUS_DRHW_ASANTE_LC:
190 return MAC8390_NONE;
196 break; 191 break;
197 192 case NUBUS_DRHW_CABLETRON:
198 case NUBUS_DRSW_TECHWORKS: 193 return MAC8390_CABLETRON;
199 case NUBUS_DRSW_DAYNA2:
200 case NUBUS_DRSW_DAYNA_LC:
201 if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
202 return MAC8390_CABLETRON;
203 else
204 return MAC8390_APPLE;
205 break; 194 break;
206 195 default:
207 case NUBUS_DRSW_FARALLON: 196 return MAC8390_APPLE;
208 return MAC8390_FARALLON;
209 break; 197 break;
198 }
199 break;
210 200
211 case NUBUS_DRSW_KINETICS: 201 case NUBUS_DRSW_ASANTE:
212 switch (dev->dr_hw) { 202 return MAC8390_ASANTE;
213 case NUBUS_DRHW_INTERLAN: 203 break;
214 return MAC8390_INTERLAN;
215 break;
216 default:
217 return MAC8390_KINETICS;
218 break;
219 }
220 break;
221 204
222 case NUBUS_DRSW_DAYNA: 205 case NUBUS_DRSW_TECHWORKS:
223 // These correspond to Dayna Sonic cards 206 case NUBUS_DRSW_DAYNA2:
224 // which use the macsonic driver 207 case NUBUS_DRSW_DAYNA_LC:
225 if (dev->dr_hw == NUBUS_DRHW_SMC9194 || 208 if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
226 dev->dr_hw == NUBUS_DRHW_INTERLAN ) 209 return MAC8390_CABLETRON;
227 return MAC8390_NONE; 210 else
228 else 211 return MAC8390_APPLE;
229 return MAC8390_DAYNA; 212 break;
213
214 case NUBUS_DRSW_FARALLON:
215 return MAC8390_FARALLON;
216 break;
217
218 case NUBUS_DRSW_KINETICS:
219 switch (dev->dr_hw) {
220 case NUBUS_DRHW_INTERLAN:
221 return MAC8390_INTERLAN;
222 break;
223 default:
224 return MAC8390_KINETICS;
230 break; 225 break;
226 }
227 break;
228
229 case NUBUS_DRSW_DAYNA:
230 /*
231 * These correspond to Dayna Sonic cards
232 * which use the macsonic driver
233 */
234 if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
235 dev->dr_hw == NUBUS_DRHW_INTERLAN)
236 return MAC8390_NONE;
237 else
238 return MAC8390_DAYNA;
239 break;
231 } 240 }
232 return MAC8390_NONE; 241 return MAC8390_NONE;
233} 242}
@@ -237,14 +246,14 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
237 unsigned long outdata = 0xA5A0B5B0; 246 unsigned long outdata = 0xA5A0B5B0;
238 unsigned long indata = 0x00000000; 247 unsigned long indata = 0x00000000;
239 /* Try writing 32 bits */ 248 /* Try writing 32 bits */
240 memcpy((char *)membase, (char *)&outdata, 4); 249 memcpy(membase, &outdata, 4);
241 /* Now compare them */ 250 /* Now compare them */
242 if (memcmp((char *)&outdata, (char *)membase, 4) == 0) 251 if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
243 return ACCESS_32; 252 return ACCESS_32;
244 /* Write 16 bit output */ 253 /* Write 16 bit output */
245 word_memcpy_tocard((char *)membase, (char *)&outdata, 4); 254 word_memcpy_tocard(membase, &outdata, 4);
246 /* Now read it back */ 255 /* Now read it back */
247 word_memcpy_fromcard((char *)&indata, (char *)membase, 4); 256 word_memcpy_fromcard(&indata, membase, 4);
248 if (outdata == indata) 257 if (outdata == indata)
249 return ACCESS_16; 258 return ACCESS_16;
250 return ACCESS_UNKNOWN; 259 return ACCESS_UNKNOWN;
@@ -258,7 +267,7 @@ static int __init mac8390_memsize(unsigned long membase)
258 local_irq_save(flags); 267 local_irq_save(flags);
259 /* Check up to 32K in 4K increments */ 268 /* Check up to 32K in 4K increments */
260 for (i = 0; i < 8; i++) { 269 for (i = 0; i < 8; i++) {
261 volatile unsigned short *m = (unsigned short *) (membase + (i * 0x1000)); 270 volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000));
262 271
263 /* Unwriteable - we have a fully decoded card and the 272 /* Unwriteable - we have a fully decoded card and the
264 RAM end located */ 273 RAM end located */
@@ -273,28 +282,127 @@ static int __init mac8390_memsize(unsigned long membase)
273 282
274 /* check for partial decode and wrap */ 283 /* check for partial decode and wrap */
275 for (j = 0; j < i; j++) { 284 for (j = 0; j < i; j++) {
276 volatile unsigned short *p = (unsigned short *) (membase + (j * 0x1000)); 285 volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
277 if (*p != (0xA5A0 | j)) 286 if (*p != (0xA5A0 | j))
278 break; 287 break;
279 } 288 }
280 } 289 }
281 local_irq_restore(flags); 290 local_irq_restore(flags);
282 /* in any case, we stopped once we tried one block too many, 291 /*
283 or once we reached 32K */ 292 * in any case, we stopped once we tried one block too many,
284 return i * 0x1000; 293 * or once we reached 32K
294 */
295 return i * 0x1000;
296}
297
298static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
299 enum mac8390_type cardtype)
300{
301 struct nubus_dir dir;
302 struct nubus_dirent ent;
303 int offset;
304 volatile unsigned short *i;
305
306 printk_once(KERN_INFO pr_fmt("%s"), version);
307
308 dev->irq = SLOT2IRQ(ndev->board->slot);
309 /* This is getting to be a habit */
310 dev->base_addr = (ndev->board->slot_addr |
311 ((ndev->board->slot & 0xf) << 20));
312
313 /*
314 * Get some Nubus info - we will trust the card's idea
315 * of where its memory and registers are.
316 */
317
318 if (nubus_get_func_dir(ndev, &dir) == -1) {
319 pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
320 dev->name, ndev->board->slot);
321 return false;
322 }
323
324 /* Get the MAC address */
325 if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
326 pr_info("%s: Couldn't get MAC address!\n", dev->name);
327 return false;
328 }
329
330 nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
331
332 if (useresources[cardtype] == 1) {
333 nubus_rewinddir(&dir);
334 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
335 &ent) == -1) {
336 pr_err("%s: Memory offset resource for slot %X not found!\n",
337 dev->name, ndev->board->slot);
338 return false;
339 }
340 nubus_get_rsrc_mem(&offset, &ent, 4);
341 dev->mem_start = dev->base_addr + offset;
342 /* yes, this is how the Apple driver does it */
343 dev->base_addr = dev->mem_start + 0x10000;
344 nubus_rewinddir(&dir);
345 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
346 &ent) == -1) {
347 pr_info("%s: Memory length resource for slot %X not found, probing\n",
348 dev->name, ndev->board->slot);
349 offset = mac8390_memsize(dev->mem_start);
350 } else {
351 nubus_get_rsrc_mem(&offset, &ent, 4);
352 }
353 dev->mem_end = dev->mem_start + offset;
354 } else {
355 switch (cardtype) {
356 case MAC8390_KINETICS:
357 case MAC8390_DAYNA: /* it's the same */
358 dev->base_addr = (int)(ndev->board->slot_addr +
359 DAYNA_8390_BASE);
360 dev->mem_start = (int)(ndev->board->slot_addr +
361 DAYNA_8390_MEM);
362 dev->mem_end = dev->mem_start +
363 mac8390_memsize(dev->mem_start);
364 break;
365 case MAC8390_INTERLAN:
366 dev->base_addr = (int)(ndev->board->slot_addr +
367 INTERLAN_8390_BASE);
368 dev->mem_start = (int)(ndev->board->slot_addr +
369 INTERLAN_8390_MEM);
370 dev->mem_end = dev->mem_start +
371 mac8390_memsize(dev->mem_start);
372 break;
373 case MAC8390_CABLETRON:
374 dev->base_addr = (int)(ndev->board->slot_addr +
375 CABLETRON_8390_BASE);
376 dev->mem_start = (int)(ndev->board->slot_addr +
377 CABLETRON_8390_MEM);
378 /* The base address is unreadable if 0x00
379 * has been written to the command register
380 * Reset the chip by writing E8390_NODMA +
381 * E8390_PAGE0 + E8390_STOP just to be
382 * sure
383 */
384 i = (void *)dev->base_addr;
385 *i = 0x21;
386 dev->mem_end = dev->mem_start +
387 mac8390_memsize(dev->mem_start);
388 break;
389
390 default:
391 pr_err("Card type %s is unsupported, sorry\n",
392 ndev->board->name);
393 return false;
394 }
395 }
396
397 return true;
285} 398}
286 399
287struct net_device * __init mac8390_probe(int unit) 400struct net_device * __init mac8390_probe(int unit)
288{ 401{
289 struct net_device *dev; 402 struct net_device *dev;
290 volatile unsigned short *i; 403 struct nubus_dev *ndev = NULL;
291 int version_disp = 0;
292 struct nubus_dev * ndev = NULL;
293 int err = -ENODEV; 404 int err = -ENODEV;
294 405
295 struct nubus_dir dir;
296 struct nubus_dirent ent;
297 int offset;
298 static unsigned int slots; 406 static unsigned int slots;
299 407
300 enum mac8390_type cardtype; 408 enum mac8390_type cardtype;
@@ -311,118 +419,19 @@ struct net_device * __init mac8390_probe(int unit)
311 if (unit >= 0) 419 if (unit >= 0)
312 sprintf(dev->name, "eth%d", unit); 420 sprintf(dev->name, "eth%d", unit);
313 421
314 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) { 422 while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
423 ndev))) {
315 /* Have we seen it already? */ 424 /* Have we seen it already? */
316 if (slots & (1<<ndev->board->slot)) 425 if (slots & (1 << ndev->board->slot))
317 continue; 426 continue;
318 slots |= 1<<ndev->board->slot; 427 slots |= 1 << ndev->board->slot;
319 428
320 if ((cardtype = mac8390_ident(ndev)) == MAC8390_NONE) 429 cardtype = mac8390_ident(ndev);
430 if (cardtype == MAC8390_NONE)
321 continue; 431 continue;
322 432
323 if (version_disp == 0) { 433 if (!mac8390_init(dev, ndev, cardtype))
324 version_disp = 1;
325 printk(version);
326 }
327
328 dev->irq = SLOT2IRQ(ndev->board->slot);
329 /* This is getting to be a habit */
330 dev->base_addr = ndev->board->slot_addr | ((ndev->board->slot&0xf) << 20);
331
332 /* Get some Nubus info - we will trust the card's idea
333 of where its memory and registers are. */
334
335 if (nubus_get_func_dir(ndev, &dir) == -1) {
336 printk(KERN_ERR "%s: Unable to get Nubus functional"
337 " directory for slot %X!\n",
338 dev->name, ndev->board->slot);
339 continue; 434 continue;
340 }
341
342 /* Get the MAC address */
343 if ((nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent)) == -1) {
344 printk(KERN_INFO "%s: Couldn't get MAC address!\n",
345 dev->name);
346 continue;
347 } else {
348 nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
349 }
350
351 if (useresources[cardtype] == 1) {
352 nubus_rewinddir(&dir);
353 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) {
354 printk(KERN_ERR "%s: Memory offset resource"
355 " for slot %X not found!\n",
356 dev->name, ndev->board->slot);
357 continue;
358 }
359 nubus_get_rsrc_mem(&offset, &ent, 4);
360 dev->mem_start = dev->base_addr + offset;
361 /* yes, this is how the Apple driver does it */
362 dev->base_addr = dev->mem_start + 0x10000;
363 nubus_rewinddir(&dir);
364 if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) {
365 printk(KERN_INFO "%s: Memory length resource"
366 " for slot %X not found"
367 ", probing\n",
368 dev->name, ndev->board->slot);
369 offset = mac8390_memsize(dev->mem_start);
370 } else {
371 nubus_get_rsrc_mem(&offset, &ent, 4);
372 }
373 dev->mem_end = dev->mem_start + offset;
374 } else {
375 switch (cardtype) {
376 case MAC8390_KINETICS:
377 case MAC8390_DAYNA: /* it's the same */
378 dev->base_addr =
379 (int)(ndev->board->slot_addr +
380 DAYNA_8390_BASE);
381 dev->mem_start =
382 (int)(ndev->board->slot_addr +
383 DAYNA_8390_MEM);
384 dev->mem_end =
385 dev->mem_start +
386 mac8390_memsize(dev->mem_start);
387 break;
388 case MAC8390_INTERLAN:
389 dev->base_addr =
390 (int)(ndev->board->slot_addr +
391 INTERLAN_8390_BASE);
392 dev->mem_start =
393 (int)(ndev->board->slot_addr +
394 INTERLAN_8390_MEM);
395 dev->mem_end =
396 dev->mem_start +
397 mac8390_memsize(dev->mem_start);
398 break;
399 case MAC8390_CABLETRON:
400 dev->base_addr =
401 (int)(ndev->board->slot_addr +
402 CABLETRON_8390_BASE);
403 dev->mem_start =
404 (int)(ndev->board->slot_addr +
405 CABLETRON_8390_MEM);
406 /* The base address is unreadable if 0x00
407 * has been written to the command register
408 * Reset the chip by writing E8390_NODMA +
409 * E8390_PAGE0 + E8390_STOP just to be
410 * sure
411 */
412 i = (void *)dev->base_addr;
413 *i = 0x21;
414 dev->mem_end =
415 dev->mem_start +
416 mac8390_memsize(dev->mem_start);
417 break;
418
419 default:
420 printk(KERN_ERR "Card type %s is"
421 " unsupported, sorry\n",
422 ndev->board->name);
423 continue;
424 }
425 }
426 435
427 /* Do the nasty 8390 stuff */ 436 /* Do the nasty 8390 stuff */
428 if (!mac8390_initdev(dev, ndev, cardtype)) 437 if (!mac8390_initdev(dev, ndev, cardtype))
@@ -458,7 +467,7 @@ int init_module(void)
458 dev_mac890[i] = dev; 467 dev_mac890[i] = dev;
459 } 468 }
460 if (!i) { 469 if (!i) {
461 printk(KERN_NOTICE "mac8390.c: No useable cards found, driver NOT installed.\n"); 470 pr_notice("No useable cards found, driver NOT installed.\n");
462 return -ENODEV; 471 return -ENODEV;
463 } 472 }
464 return 0; 473 return 0;
@@ -493,22 +502,23 @@ static const struct net_device_ops mac8390_netdev_ops = {
493#endif 502#endif
494}; 503};
495 504
496static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev, 505static int __init mac8390_initdev(struct net_device *dev,
497 enum mac8390_type type) 506 struct nubus_dev *ndev,
507 enum mac8390_type type)
498{ 508{
499 static u32 fwrd4_offsets[16]={ 509 static u32 fwrd4_offsets[16] = {
500 0, 4, 8, 12, 510 0, 4, 8, 12,
501 16, 20, 24, 28, 511 16, 20, 24, 28,
502 32, 36, 40, 44, 512 32, 36, 40, 44,
503 48, 52, 56, 60 513 48, 52, 56, 60
504 }; 514 };
505 static u32 back4_offsets[16]={ 515 static u32 back4_offsets[16] = {
506 60, 56, 52, 48, 516 60, 56, 52, 48,
507 44, 40, 36, 32, 517 44, 40, 36, 32,
508 28, 24, 20, 16, 518 28, 24, 20, 16,
509 12, 8, 4, 0 519 12, 8, 4, 0
510 }; 520 };
511 static u32 fwrd2_offsets[16]={ 521 static u32 fwrd2_offsets[16] = {
512 0, 2, 4, 6, 522 0, 2, 4, 6,
513 8, 10, 12, 14, 523 8, 10, 12, 14,
514 16, 18, 20, 22, 524 16, 18, 20, 22,
@@ -526,47 +536,47 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
526 536
527 /* Cabletron's TX/RX buffers are backwards */ 537 /* Cabletron's TX/RX buffers are backwards */
528 if (type == MAC8390_CABLETRON) { 538 if (type == MAC8390_CABLETRON) {
529 ei_status.tx_start_page = CABLETRON_TX_START_PG; 539 ei_status.tx_start_page = CABLETRON_TX_START_PG;
530 ei_status.rx_start_page = CABLETRON_RX_START_PG; 540 ei_status.rx_start_page = CABLETRON_RX_START_PG;
531 ei_status.stop_page = CABLETRON_RX_STOP_PG; 541 ei_status.stop_page = CABLETRON_RX_STOP_PG;
532 ei_status.rmem_start = dev->mem_start; 542 ei_status.rmem_start = dev->mem_start;
533 ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256; 543 ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
534 } else { 544 } else {
535 ei_status.tx_start_page = WD_START_PG; 545 ei_status.tx_start_page = WD_START_PG;
536 ei_status.rx_start_page = WD_START_PG + TX_PAGES; 546 ei_status.rx_start_page = WD_START_PG + TX_PAGES;
537 ei_status.stop_page = (dev->mem_end - dev->mem_start)/256; 547 ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
538 ei_status.rmem_start = dev->mem_start + TX_PAGES*256; 548 ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
539 ei_status.rmem_end = dev->mem_end; 549 ei_status.rmem_end = dev->mem_end;
540 } 550 }
541 551
542 /* Fill in model-specific information and functions */ 552 /* Fill in model-specific information and functions */
543 switch(type) { 553 switch (type) {
544 case MAC8390_FARALLON: 554 case MAC8390_FARALLON:
545 case MAC8390_APPLE: 555 case MAC8390_APPLE:
546 switch(mac8390_testio(dev->mem_start)) { 556 switch (mac8390_testio(dev->mem_start)) {
547 case ACCESS_UNKNOWN: 557 case ACCESS_UNKNOWN:
548 printk("Don't know how to access card memory!\n"); 558 pr_info("Don't know how to access card memory!\n");
549 return -ENODEV; 559 return -ENODEV;
550 break; 560 break;
551 561
552 case ACCESS_16: 562 case ACCESS_16:
553 /* 16 bit card, register map is reversed */ 563 /* 16 bit card, register map is reversed */
554 ei_status.reset_8390 = &mac8390_no_reset; 564 ei_status.reset_8390 = &mac8390_no_reset;
555 ei_status.block_input = &slow_sane_block_input; 565 ei_status.block_input = &slow_sane_block_input;
556 ei_status.block_output = &slow_sane_block_output; 566 ei_status.block_output = &slow_sane_block_output;
557 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 567 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
558 ei_status.reg_offset = back4_offsets; 568 ei_status.reg_offset = back4_offsets;
559 break; 569 break;
560 570
561 case ACCESS_32: 571 case ACCESS_32:
562 /* 32 bit card, register map is reversed */ 572 /* 32 bit card, register map is reversed */
563 ei_status.reset_8390 = &mac8390_no_reset; 573 ei_status.reset_8390 = &mac8390_no_reset;
564 ei_status.block_input = &sane_block_input; 574 ei_status.block_input = &sane_block_input;
565 ei_status.block_output = &sane_block_output; 575 ei_status.block_output = &sane_block_output;
566 ei_status.get_8390_hdr = &sane_get_8390_hdr; 576 ei_status.get_8390_hdr = &sane_get_8390_hdr;
567 ei_status.reg_offset = back4_offsets; 577 ei_status.reg_offset = back4_offsets;
568 access_bitmode = 1; 578 access_bitmode = 1;
569 break; 579 break;
570 } 580 }
571 break; 581 break;
572 582
@@ -608,24 +618,25 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
608 ei_status.block_input = &slow_sane_block_input; 618 ei_status.block_input = &slow_sane_block_input;
609 ei_status.block_output = &slow_sane_block_output; 619 ei_status.block_output = &slow_sane_block_output;
610 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 620 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
611 ei_status.reg_offset = fwrd4_offsets; 621 ei_status.reg_offset = fwrd4_offsets;
612 break; 622 break;
613 623
614 default: 624 default:
615 printk(KERN_ERR "Card type %s is unsupported, sorry\n", ndev->board->name); 625 pr_err("Card type %s is unsupported, sorry\n",
626 ndev->board->name);
616 return -ENODEV; 627 return -ENODEV;
617 } 628 }
618 629
619 __NS8390_init(dev, 0); 630 __NS8390_init(dev, 0);
620 631
621 /* Good, done, now spit out some messages */ 632 /* Good, done, now spit out some messages */
622 printk(KERN_INFO "%s: %s in slot %X (type %s)\n", 633 pr_info("%s: %s in slot %X (type %s)\n",
623 dev->name, ndev->board->name, ndev->board->slot, cardname[type]); 634 dev->name, ndev->board->name, ndev->board->slot,
624 printk(KERN_INFO 635 cardname[type]);
625 "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n", 636 pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
626 dev->dev_addr, dev->irq, 637 dev->dev_addr, dev->irq,
627 (unsigned int)(dev->mem_end - dev->mem_start) >> 10, 638 (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
628 dev->mem_start, access_bitmode ? 32 : 16); 639 dev->mem_start, access_bitmode ? 32 : 16);
629 return 0; 640 return 0;
630} 641}
631 642
@@ -633,7 +644,7 @@ static int mac8390_open(struct net_device *dev)
633{ 644{
634 __ei_open(dev); 645 __ei_open(dev);
635 if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) { 646 if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
636 printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq); 647 pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
637 return -EAGAIN; 648 return -EAGAIN;
638 } 649 }
639 return 0; 650 return 0;
@@ -650,72 +661,71 @@ static void mac8390_no_reset(struct net_device *dev)
650{ 661{
651 ei_status.txing = 0; 662 ei_status.txing = 0;
652 if (ei_debug > 1) 663 if (ei_debug > 1)
653 printk("reset not supported\n"); 664 pr_info("reset not supported\n");
654 return; 665 return;
655} 666}
656 667
657static void interlan_reset(struct net_device *dev) 668static void interlan_reset(struct net_device *dev)
658{ 669{
659 unsigned char *target=nubus_slot_addr(IRQ2SLOT(dev->irq)); 670 unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
660 if (ei_debug > 1) 671 if (ei_debug > 1)
661 printk("Need to reset the NS8390 t=%lu...", jiffies); 672 pr_info("Need to reset the NS8390 t=%lu...", jiffies);
662 ei_status.txing = 0; 673 ei_status.txing = 0;
663 target[0xC0000] = 0; 674 target[0xC0000] = 0;
664 if (ei_debug > 1) 675 if (ei_debug > 1)
665 printk("reset complete\n"); 676 pr_cont("reset complete\n");
666 return; 677 return;
667} 678}
668 679
669/* dayna_memcpy_fromio/dayna_memcpy_toio */ 680/* dayna_memcpy_fromio/dayna_memcpy_toio */
670/* directly from daynaport.c by Alan Cox */ 681/* directly from daynaport.c by Alan Cox */
671static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count) 682static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
683 int count)
672{ 684{
673 volatile unsigned char *ptr; 685 volatile unsigned char *ptr;
674 unsigned char *target=to; 686 unsigned char *target = to;
675 from<<=1; /* word, skip overhead */ 687 from <<= 1; /* word, skip overhead */
676 ptr=(unsigned char *)(dev->mem_start+from); 688 ptr = (unsigned char *)(dev->mem_start+from);
677 /* Leading byte? */ 689 /* Leading byte? */
678 if (from&2) { 690 if (from & 2) {
679 *target++ = ptr[-1]; 691 *target++ = ptr[-1];
680 ptr += 2; 692 ptr += 2;
681 count--; 693 count--;
682 } 694 }
683 while(count>=2) 695 while (count >= 2) {
684 {
685 *(unsigned short *)target = *(unsigned short volatile *)ptr; 696 *(unsigned short *)target = *(unsigned short volatile *)ptr;
686 ptr += 4; /* skip cruft */ 697 ptr += 4; /* skip cruft */
687 target += 2; 698 target += 2;
688 count-=2; 699 count -= 2;
689 } 700 }
690 /* Trailing byte? */ 701 /* Trailing byte? */
691 if(count) 702 if (count)
692 *target = *ptr; 703 *target = *ptr;
693} 704}
694 705
695static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count) 706static void dayna_memcpy_tocard(struct net_device *dev, int to,
707 const void *from, int count)
696{ 708{
697 volatile unsigned short *ptr; 709 volatile unsigned short *ptr;
698 const unsigned char *src=from; 710 const unsigned char *src = from;
699 to<<=1; /* word, skip overhead */ 711 to <<= 1; /* word, skip overhead */
700 ptr=(unsigned short *)(dev->mem_start+to); 712 ptr = (unsigned short *)(dev->mem_start+to);
701 /* Leading byte? */ 713 /* Leading byte? */
702 if (to&2) { /* avoid a byte write (stomps on other data) */ 714 if (to & 2) { /* avoid a byte write (stomps on other data) */
703 ptr[-1] = (ptr[-1]&0xFF00)|*src++; 715 ptr[-1] = (ptr[-1]&0xFF00)|*src++;
704 ptr++; 716 ptr++;
705 count--; 717 count--;
706 } 718 }
707 while(count>=2) 719 while (count >= 2) {
708 { 720 *ptr++ = *(unsigned short *)src; /* Copy and */
709 *ptr++=*(unsigned short *)src; /* Copy and */
710 ptr++; /* skip cruft */ 721 ptr++; /* skip cruft */
711 src += 2; 722 src += 2;
712 count-=2; 723 count -= 2;
713 } 724 }
714 /* Trailing byte? */ 725 /* Trailing byte? */
715 if(count) 726 if (count) {
716 {
717 /* card doesn't like byte writes */ 727 /* card doesn't like byte writes */
718 *ptr=(*ptr&0x00FF)|(*src << 8); 728 *ptr = (*ptr & 0x00FF) | (*src << 8);
719 } 729 }
720} 730}
721 731
@@ -738,11 +748,14 @@ static void sane_block_input(struct net_device *dev, int count,
738 if (xfer_start + count > ei_status.rmem_end) { 748 if (xfer_start + count > ei_status.rmem_end) {
739 /* We must wrap the input move. */ 749 /* We must wrap the input move. */
740 int semi_count = ei_status.rmem_end - xfer_start; 750 int semi_count = ei_status.rmem_end - xfer_start;
741 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, semi_count); 751 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
752 semi_count);
742 count -= semi_count; 753 count -= semi_count;
743 memcpy_toio(skb->data + semi_count, (char *)ei_status.rmem_start, count); 754 memcpy_toio(skb->data + semi_count,
755 (char *)ei_status.rmem_start, count);
744 } else { 756 } else {
745 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, count); 757 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
758 count);
746 } 759 }
747} 760}
748 761
@@ -755,16 +768,18 @@ static void sane_block_output(struct net_device *dev, int count,
755} 768}
756 769
757/* dayna block input/output */ 770/* dayna block input/output */
758static void dayna_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) 771static void dayna_get_8390_hdr(struct net_device *dev,
772 struct e8390_pkt_hdr *hdr, int ring_page)
759{ 773{
760 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 774 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
761 775
762 dayna_memcpy_fromcard(dev, (void *)hdr, hdr_start, 4); 776 dayna_memcpy_fromcard(dev, hdr, hdr_start, 4);
763 /* Fix endianness */ 777 /* Fix endianness */
764 hdr->count=(hdr->count&0xFF)<<8|(hdr->count>>8); 778 hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8);
765} 779}
766 780
767static void dayna_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) 781static void dayna_block_input(struct net_device *dev, int count,
782 struct sk_buff *skb, int ring_offset)
768{ 783{
769 unsigned long xfer_base = ring_offset - (WD_START_PG<<8); 784 unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
770 unsigned long xfer_start = xfer_base+dev->mem_start; 785 unsigned long xfer_start = xfer_base+dev->mem_start;
@@ -772,8 +787,7 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
772 /* Note the offset math is done in card memory space which is word 787 /* Note the offset math is done in card memory space which is word
773 per long onto our space. */ 788 per long onto our space. */
774 789
775 if (xfer_start + count > ei_status.rmem_end) 790 if (xfer_start + count > ei_status.rmem_end) {
776 {
777 /* We must wrap the input move. */ 791 /* We must wrap the input move. */
778 int semi_count = ei_status.rmem_end - xfer_start; 792 int semi_count = ei_status.rmem_end - xfer_start;
779 dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count); 793 dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
@@ -781,15 +795,14 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
781 dayna_memcpy_fromcard(dev, skb->data + semi_count, 795 dayna_memcpy_fromcard(dev, skb->data + semi_count,
782 ei_status.rmem_start - dev->mem_start, 796 ei_status.rmem_start - dev->mem_start,
783 count); 797 count);
784 } 798 } else {
785 else
786 {
787 dayna_memcpy_fromcard(dev, skb->data, xfer_base, count); 799 dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
788 } 800 }
789} 801}
790 802
791static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf, 803static void dayna_block_output(struct net_device *dev, int count,
792 int start_page) 804 const unsigned char *buf,
805 int start_page)
793{ 806{
794 long shmem = (start_page - WD_START_PG)<<8; 807 long shmem = (start_page - WD_START_PG)<<8;
795 808
@@ -797,40 +810,39 @@ static void dayna_block_output(struct net_device *dev, int count, const unsigned
797} 810}
798 811
799/* Cabletron block I/O */ 812/* Cabletron block I/O */
800static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, 813static void slow_sane_get_8390_hdr(struct net_device *dev,
801 int ring_page) 814 struct e8390_pkt_hdr *hdr,
815 int ring_page)
802{ 816{
803 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 817 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
804 word_memcpy_fromcard((void *)hdr, (char *)dev->mem_start+hdr_start, 4); 818 word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
805 /* Register endianism - fix here rather than 8390.c */ 819 /* Register endianism - fix here rather than 8390.c */
806 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); 820 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
807} 821}
808 822
809static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb, 823static void slow_sane_block_input(struct net_device *dev, int count,
810 int ring_offset) 824 struct sk_buff *skb, int ring_offset)
811{ 825{
812 unsigned long xfer_base = ring_offset - (WD_START_PG<<8); 826 unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
813 unsigned long xfer_start = xfer_base+dev->mem_start; 827 unsigned long xfer_start = xfer_base+dev->mem_start;
814 828
815 if (xfer_start + count > ei_status.rmem_end) 829 if (xfer_start + count > ei_status.rmem_end) {
816 {
817 /* We must wrap the input move. */ 830 /* We must wrap the input move. */
818 int semi_count = ei_status.rmem_end - xfer_start; 831 int semi_count = ei_status.rmem_end - xfer_start;
819 word_memcpy_fromcard(skb->data, (char *)dev->mem_start + 832 word_memcpy_fromcard(skb->data,
820 xfer_base, semi_count); 833 (char *)dev->mem_start + xfer_base,
834 semi_count);
821 count -= semi_count; 835 count -= semi_count;
822 word_memcpy_fromcard(skb->data + semi_count, 836 word_memcpy_fromcard(skb->data + semi_count,
823 (char *)ei_status.rmem_start, count); 837 (char *)ei_status.rmem_start, count);
824 } 838 } else {
825 else 839 word_memcpy_fromcard(skb->data,
826 { 840 (char *)dev->mem_start + xfer_base, count);
827 word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
828 xfer_base, count);
829 } 841 }
830} 842}
831 843
832static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf, 844static void slow_sane_block_output(struct net_device *dev, int count,
833 int start_page) 845 const unsigned char *buf, int start_page)
834{ 846{
835 long shmem = (start_page - WD_START_PG)<<8; 847 long shmem = (start_page - WD_START_PG)<<8;
836 848
@@ -843,10 +855,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
843 const unsigned short *from = fp; 855 const unsigned short *from = fp;
844 856
845 count++; 857 count++;
846 count/=2; 858 count /= 2;
847 859
848 while(count--) 860 while (count--)
849 *to++=*from++; 861 *to++ = *from++;
850} 862}
851 863
852static void word_memcpy_fromcard(void *tp, const void *fp, int count) 864static void word_memcpy_fromcard(void *tp, const void *fp, int count)
@@ -855,10 +867,10 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count)
855 const volatile unsigned short *from = fp; 867 const volatile unsigned short *from = fp;
856 868
857 count++; 869 count++;
858 count/=2; 870 count /= 2;
859 871
860 while(count--) 872 while (count--)
861 *to++=*from++; 873 *to++ = *from++;
862} 874}
863 875
864 876
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 21a9c9ab4b3..fa0dc514dba 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -418,7 +418,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
418#define MACVLAN_FEATURES \ 418#define MACVLAN_FEATURES \
419 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 419 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
420 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ 420 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
421 NETIF_F_TSO_ECN | NETIF_F_TSO6) 421 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO)
422 422
423#define MACVLAN_STATE_MASK \ 423#define MACVLAN_STATE_MASK \
424 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 424 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 2af81735386..9f72cb45f4a 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -51,14 +51,11 @@
51 51
52static const char *meth_str="SGI O2 Fast Ethernet"; 52static const char *meth_str="SGI O2 Fast Ethernet";
53 53
54#define HAVE_TX_TIMEOUT
55/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ 54/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
56#define TX_TIMEOUT (400*HZ/1000) 55#define TX_TIMEOUT (400*HZ/1000)
57 56
58#ifdef HAVE_TX_TIMEOUT
59static int timeout = TX_TIMEOUT; 57static int timeout = TX_TIMEOUT;
60module_param(timeout, int, 0); 58module_param(timeout, int, 0);
61#endif
62 59
63/* 60/*
64 * This structure is private to each device. It is used to pass 61 * This structure is private to each device. It is used to pass
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3cf56d90d85..8f6e816a739 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1271,7 +1271,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
1271 return __mlx4_init_one(pdev, NULL); 1271 return __mlx4_init_one(pdev, NULL);
1272} 1272}
1273 1273
1274static struct pci_device_id mlx4_pci_table[] = { 1274static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1275 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ 1275 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
1276 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ 1276 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
1277 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 1277 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index af67af55efe..e24072a9a97 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,6 @@
55#include <linux/types.h> 55#include <linux/types.h>
56#include <linux/inet_lro.h> 56#include <linux/inet_lro.h>
57#include <asm/system.h> 57#include <asm/system.h>
58#include <linux/list.h>
59 58
60static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 59static char mv643xx_eth_driver_name[] = "mv643xx_eth";
61static char mv643xx_eth_driver_version[] = "1.4"; 60static char mv643xx_eth_driver_version[] = "1.4";
@@ -1697,7 +1696,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev)
1697 return 0; 1696 return 0;
1698 1697
1699 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1698 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1700 list_for_each_entry(ha, &dev->uc.list, list) { 1699 netdev_for_each_uc_addr(ha, dev) {
1701 if (memcmp(dev->dev_addr, ha->addr, 5)) 1700 if (memcmp(dev->dev_addr, ha->addr, 5))
1702 return 0; 1701 return 0;
1703 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1702 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3fcb1c356e0..c0884a9cba3 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -4085,7 +4085,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
4085#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 4085#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
4086#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009 4086#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
4087 4087
4088static struct pci_device_id myri10ge_pci_tbl[] = { 4088static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
4089 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, 4089 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
4090 {PCI_DEVICE 4090 {PCI_DEVICE
4091 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)}, 4091 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b3513ad3b70..8b431308535 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -716,10 +716,10 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
716 pad[0] = MYRI_PAD_LEN; 716 pad[0] = MYRI_PAD_LEN;
717 pad[1] = 0xab; 717 pad[1] = 0xab;
718 718
719 /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length 719 /* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
720 * in here instead. It is up to the 802.2 layer to carry protocol information. 720 * length in here instead.
721 */ 721 */
722 if (type != ETH_P_802_3) 722 if (type != ETH_P_802_3 && type != ETH_P_802_2)
723 eth->h_proto = htons(type); 723 eth->h_proto = htons(type);
724 else 724 else
725 eth->h_proto = htons(len); 725 eth->h_proto = htons(len);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 797fe164ce2..2d7b3bbfed0 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -247,7 +247,7 @@ static struct {
247 { "NatSemi DP8381[56]", 0, 24 }, 247 { "NatSemi DP8381[56]", 0, 24 },
248}; 248};
249 249
250static struct pci_device_id natsemi_pci_tbl[] __devinitdata = { 250static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, 251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253 { } /* terminate list */ 253 { } /* terminate list */
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 3fcebb70151..85aec4f1013 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -136,7 +136,7 @@ static struct {
136}; 136};
137 137
138 138
139static struct pci_device_id ne2k_pci_tbl[] = { 139static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = {
140 { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 }, 140 { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
141 { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 }, 141 { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
142 { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 }, 142 { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
index 11d94e2434e..861a0590b1f 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/netxen/Makefile
@@ -18,7 +18,7 @@
18# MA 02111-1307, USA. 18# MA 02111-1307, USA.
19# 19#
20# The full GNU General Public License is included in this distribution 20# The full GNU General Public License is included in this distribution
21# in the file called LICENSE. 21# in the file called "COPYING".
22# 22#
23# 23#
24 24
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 9bc5bd1d538..144d2e88042 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -420,7 +420,7 @@ struct status_desc {
420} __attribute__ ((aligned(16))); 420} __attribute__ ((aligned(16)));
421 421
422/* UNIFIED ROMIMAGE *************************/ 422/* UNIFIED ROMIMAGE *************************/
423#define NX_UNI_FW_MIN_SIZE 0x3eb000 423#define NX_UNI_FW_MIN_SIZE 0xc8000
424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
425#define NX_UNI_DIR_SECT_BOOTLD 0x6 425#define NX_UNI_DIR_SECT_BOOTLD 0x6
426#define NX_UNI_DIR_SECT_FW 0x7 426#define NX_UNI_DIR_SECT_FW 0x7
@@ -1427,8 +1427,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
1427 1427
1428} 1428}
1429 1429
1430int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1430int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
1431int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1431int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
1432extern void netxen_change_ringparam(struct netxen_adapter *adapter); 1432extern void netxen_change_ringparam(struct netxen_adapter *adapter);
1433extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, 1433extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
1434 int *valp); 1434 int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 9cb8f687804..2a8ef5fc966 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 542f408333f..f8499e56cbe 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index d138fc22927..622e4c8be93 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -969,7 +969,8 @@ enum {
969#define NX_DEV_READY 3 969#define NX_DEV_READY 3
970#define NX_DEV_NEED_RESET 4 970#define NX_DEV_NEED_RESET 4
971#define NX_DEV_NEED_QUISCENT 5 971#define NX_DEV_NEED_QUISCENT 5
972#define NX_DEV_FAILED 6 972#define NX_DEV_NEED_AER 6
973#define NX_DEV_FAILED 7
973 974
974#define NX_RCODE_DRIVER_INFO 0x20000000 975#define NX_RCODE_DRIVER_INFO 0x20000000
975#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000 976#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 85e28e60ecf..dd45c7a9122 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -777,17 +777,20 @@ int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
777int netxen_config_intr_coalesce(struct netxen_adapter *adapter) 777int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
778{ 778{
779 nx_nic_req_t req; 779 nx_nic_req_t req;
780 u64 word; 780 u64 word[6];
781 int rv; 781 int rv, i;
782 782
783 memset(&req, 0, sizeof(nx_nic_req_t)); 783 memset(&req, 0, sizeof(nx_nic_req_t));
784 memset(word, 0, sizeof(word));
784 785
785 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); 786 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
786 787
787 word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); 788 word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
788 req.req_hdr = cpu_to_le64(word); 789 req.req_hdr = cpu_to_le64(word[0]);
789 790
790 memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal)); 791 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
792 for (i = 0; i < 6; i++)
793 req.words[i] = cpu_to_le64(word[i]);
791 794
792 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 795 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
793 if (rv != 0) { 796 if (rv != 0) {
@@ -1033,7 +1036,7 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
1033 return 0; 1036 return 0;
1034} 1037}
1035 1038
1036int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac) 1039int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
1037{ 1040{
1038 __le32 *pmac = (__le32 *) mac; 1041 __le32 *pmac = (__le32 *) mac;
1039 u32 offset; 1042 u32 offset;
@@ -1058,7 +1061,7 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
1058 return 0; 1061 return 0;
1059} 1062}
1060 1063
1061int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac) 1064int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
1062{ 1065{
1063 uint32_t crbaddr, mac_hi, mac_lo; 1066 uint32_t crbaddr, mac_hi, mac_lo;
1064 int pci_func = adapter->ahw.pci_func; 1067 int pci_func = adapter->ahw.pci_func;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 3fd1dcb3583..e2c5b6f2df0 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 64cff68d372..1c63610ead4 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -780,6 +780,9 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
780 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 780 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
781 return 1; 781 return 1;
782 782
783 if (adapter->need_fw_reset)
784 return 1;
785
783 /* last attempt had failed */ 786 /* last attempt had failed */
784 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) 787 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
785 return 1; 788 return 1;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 24279e6e55f..08780ef1c1f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -19,7 +19,7 @@
19 * MA 02111-1307, USA. 19 * MA 02111-1307, USA.
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE. 22 * in the file called "COPYING".
23 * 23 *
24 */ 24 */
25 25
@@ -35,6 +35,7 @@
35#include <linux/ipv6.h> 35#include <linux/ipv6.h>
36#include <linux/inetdevice.h> 36#include <linux/inetdevice.h>
37#include <linux/sysfs.h> 37#include <linux/sysfs.h>
38#include <linux/aer.h>
38 39
39MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); 40MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
40MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
@@ -84,6 +85,7 @@ static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
84static void netxen_create_diag_entries(struct netxen_adapter *adapter); 85static void netxen_create_diag_entries(struct netxen_adapter *adapter);
85static void netxen_remove_diag_entries(struct netxen_adapter *adapter); 86static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
86 87
88static int nx_dev_request_aer(struct netxen_adapter *adapter);
87static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); 89static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
88static int netxen_can_start_firmware(struct netxen_adapter *adapter); 90static int netxen_can_start_firmware(struct netxen_adapter *adapter);
89 91
@@ -98,7 +100,7 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
98 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ 100 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
99 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 101 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
100 102
101static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 103static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
102 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), 104 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
103 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), 105 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
104 ENTRY(PCI_DEVICE_ID_NX2031_4GCU), 106 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
@@ -430,7 +432,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
430{ 432{
431 int i; 433 int i;
432 unsigned char *p; 434 unsigned char *p;
433 __le64 mac_addr; 435 u64 mac_addr;
434 struct net_device *netdev = adapter->netdev; 436 struct net_device *netdev = adapter->netdev;
435 struct pci_dev *pdev = adapter->pdev; 437 struct pci_dev *pdev = adapter->pdev;
436 438
@@ -1262,6 +1264,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1262 if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) 1264 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
1263 goto err_out_disable_pdev; 1265 goto err_out_disable_pdev;
1264 1266
1267 if (NX_IS_REVISION_P3(pdev->revision))
1268 pci_enable_pcie_error_reporting(pdev);
1269
1265 pci_set_master(pdev); 1270 pci_set_master(pdev);
1266 1271
1267 netdev = alloc_etherdev(sizeof(struct netxen_adapter)); 1272 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
@@ -1409,17 +1414,19 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1409 1414
1410 netxen_release_firmware(adapter); 1415 netxen_release_firmware(adapter);
1411 1416
1417 if (NX_IS_REVISION_P3(pdev->revision))
1418 pci_disable_pcie_error_reporting(pdev);
1419
1412 pci_release_regions(pdev); 1420 pci_release_regions(pdev);
1413 pci_disable_device(pdev); 1421 pci_disable_device(pdev);
1414 pci_set_drvdata(pdev, NULL); 1422 pci_set_drvdata(pdev, NULL);
1415 1423
1416 free_netdev(netdev); 1424 free_netdev(netdev);
1417} 1425}
1418static int __netxen_nic_shutdown(struct pci_dev *pdev) 1426
1427static void netxen_nic_detach_func(struct netxen_adapter *adapter)
1419{ 1428{
1420 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1421 struct net_device *netdev = adapter->netdev; 1429 struct net_device *netdev = adapter->netdev;
1422 int retval;
1423 1430
1424 netif_device_detach(netdev); 1431 netif_device_detach(netdev);
1425 1432
@@ -1438,53 +1445,22 @@ static int __netxen_nic_shutdown(struct pci_dev *pdev)
1438 nx_decr_dev_ref_cnt(adapter); 1445 nx_decr_dev_ref_cnt(adapter);
1439 1446
1440 clear_bit(__NX_RESETTING, &adapter->state); 1447 clear_bit(__NX_RESETTING, &adapter->state);
1441
1442 retval = pci_save_state(pdev);
1443 if (retval)
1444 return retval;
1445
1446 if (netxen_nic_wol_supported(adapter)) {
1447 pci_enable_wake(pdev, PCI_D3cold, 1);
1448 pci_enable_wake(pdev, PCI_D3hot, 1);
1449 }
1450
1451 pci_disable_device(pdev);
1452
1453 return 0;
1454} 1448}
1455static void netxen_nic_shutdown(struct pci_dev *pdev)
1456{
1457 if (__netxen_nic_shutdown(pdev))
1458 return;
1459}
1460#ifdef CONFIG_PM
1461static int
1462netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1463{
1464 int retval;
1465
1466 retval = __netxen_nic_shutdown(pdev);
1467 if (retval)
1468 return retval;
1469 1449
1470 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1450static int netxen_nic_attach_func(struct pci_dev *pdev)
1471 return 0;
1472}
1473
1474static int
1475netxen_nic_resume(struct pci_dev *pdev)
1476{ 1451{
1477 struct netxen_adapter *adapter = pci_get_drvdata(pdev); 1452 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1478 struct net_device *netdev = adapter->netdev; 1453 struct net_device *netdev = adapter->netdev;
1479 int err; 1454 int err;
1480 1455
1481 pci_set_power_state(pdev, PCI_D0);
1482 pci_restore_state(pdev);
1483
1484 err = pci_enable_device(pdev); 1456 err = pci_enable_device(pdev);
1485 if (err) 1457 if (err)
1486 return err; 1458 return err;
1487 1459
1460 pci_set_power_state(pdev, PCI_D0);
1461 pci_set_master(pdev);
1462 pci_restore_state(pdev);
1463
1488 adapter->ahw.crb_win = -1; 1464 adapter->ahw.crb_win = -1;
1489 adapter->ahw.ocm_win = -1; 1465 adapter->ahw.ocm_win = -1;
1490 1466
@@ -1503,11 +1479,10 @@ netxen_nic_resume(struct pci_dev *pdev)
1503 if (err) 1479 if (err)
1504 goto err_out_detach; 1480 goto err_out_detach;
1505 1481
1506 netif_device_attach(netdev);
1507
1508 netxen_config_indev_addr(netdev, NETDEV_UP); 1482 netxen_config_indev_addr(netdev, NETDEV_UP);
1509 } 1483 }
1510 1484
1485 netif_device_attach(netdev);
1511 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); 1486 netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
1512 return 0; 1487 return 0;
1513 1488
@@ -1517,6 +1492,85 @@ err_out:
1517 nx_decr_dev_ref_cnt(adapter); 1492 nx_decr_dev_ref_cnt(adapter);
1518 return err; 1493 return err;
1519} 1494}
1495
1496static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
1497 pci_channel_state_t state)
1498{
1499 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1500
1501 if (state == pci_channel_io_perm_failure)
1502 return PCI_ERS_RESULT_DISCONNECT;
1503
1504 if (nx_dev_request_aer(adapter))
1505 return PCI_ERS_RESULT_RECOVERED;
1506
1507 netxen_nic_detach_func(adapter);
1508
1509 pci_disable_device(pdev);
1510
1511 return PCI_ERS_RESULT_NEED_RESET;
1512}
1513
1514static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
1515{
1516 int err = 0;
1517
1518 err = netxen_nic_attach_func(pdev);
1519
1520 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1521}
1522
1523static void netxen_io_resume(struct pci_dev *pdev)
1524{
1525 pci_cleanup_aer_uncorrect_error_status(pdev);
1526}
1527
1528static void netxen_nic_shutdown(struct pci_dev *pdev)
1529{
1530 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1531
1532 netxen_nic_detach_func(adapter);
1533
1534 if (pci_save_state(pdev))
1535 return;
1536
1537 if (netxen_nic_wol_supported(adapter)) {
1538 pci_enable_wake(pdev, PCI_D3cold, 1);
1539 pci_enable_wake(pdev, PCI_D3hot, 1);
1540 }
1541
1542 pci_disable_device(pdev);
1543}
1544
1545#ifdef CONFIG_PM
1546static int
1547netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1548{
1549 struct netxen_adapter *adapter = pci_get_drvdata(pdev);
1550 int retval;
1551
1552 netxen_nic_detach_func(adapter);
1553
1554 retval = pci_save_state(pdev);
1555 if (retval)
1556 return retval;
1557
1558 if (netxen_nic_wol_supported(adapter)) {
1559 pci_enable_wake(pdev, PCI_D3cold, 1);
1560 pci_enable_wake(pdev, PCI_D3hot, 1);
1561 }
1562
1563 pci_disable_device(pdev);
1564 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1565
1566 return 0;
1567}
1568
1569static int
1570netxen_nic_resume(struct pci_dev *pdev)
1571{
1572 return netxen_nic_attach_func(pdev);
1573}
1520#endif 1574#endif
1521 1575
1522static int netxen_nic_open(struct net_device *netdev) 1576static int netxen_nic_open(struct net_device *netdev)
@@ -2104,20 +2158,49 @@ nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
2104 return count; 2158 return count;
2105} 2159}
2106 2160
2107static void 2161static int
2162nx_dev_request_aer(struct netxen_adapter *adapter)
2163{
2164 u32 state;
2165 int ret = -EINVAL;
2166
2167 if (netxen_api_lock(adapter))
2168 return ret;
2169
2170 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2171
2172 if (state == NX_DEV_NEED_AER)
2173 ret = 0;
2174 else if (state == NX_DEV_READY) {
2175 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
2176 ret = 0;
2177 }
2178
2179 netxen_api_unlock(adapter);
2180 return ret;
2181}
2182
2183static int
2108nx_dev_request_reset(struct netxen_adapter *adapter) 2184nx_dev_request_reset(struct netxen_adapter *adapter)
2109{ 2185{
2110 u32 state; 2186 u32 state;
2187 int ret = -EINVAL;
2111 2188
2112 if (netxen_api_lock(adapter)) 2189 if (netxen_api_lock(adapter))
2113 return; 2190 return ret;
2114 2191
2115 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2192 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2116 2193
2117 if (state != NX_DEV_INITALIZING) 2194 if (state == NX_DEV_NEED_RESET)
2195 ret = 0;
2196 else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
2118 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); 2197 NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
2198 ret = 0;
2199 }
2119 2200
2120 netxen_api_unlock(adapter); 2201 netxen_api_unlock(adapter);
2202
2203 return ret;
2121} 2204}
2122 2205
2123static int 2206static int
@@ -2271,17 +2354,29 @@ netxen_check_health(struct netxen_adapter *adapter)
2271 u32 state, heartbit; 2354 u32 state, heartbit;
2272 struct net_device *netdev = adapter->netdev; 2355 struct net_device *netdev = adapter->netdev;
2273 2356
2357 state = NXRD32(adapter, NX_CRB_DEV_STATE);
2358 if (state == NX_DEV_NEED_AER)
2359 return 0;
2360
2274 if (netxen_nic_check_temp(adapter)) 2361 if (netxen_nic_check_temp(adapter))
2275 goto detach; 2362 goto detach;
2276 2363
2277 if (adapter->need_fw_reset) { 2364 if (adapter->need_fw_reset) {
2278 nx_dev_request_reset(adapter); 2365 if (nx_dev_request_reset(adapter))
2366 return 0;
2279 goto detach; 2367 goto detach;
2280 } 2368 }
2281 2369
2282 state = NXRD32(adapter, NX_CRB_DEV_STATE); 2370 /* NX_DEV_NEED_RESET, this state can be marked in two cases
2283 if (state == NX_DEV_NEED_RESET) 2371 * 1. Tx timeout 2. Fw hang
2284 goto detach; 2372 * Send request to destroy context in case of tx timeout only
2373 * and doesn't required in case of Fw hang
2374 */
2375 if (state == NX_DEV_NEED_RESET) {
2376 adapter->need_fw_reset = 1;
2377 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2378 goto detach;
2379 }
2285 2380
2286 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 2381 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2287 return 0; 2382 return 0;
@@ -2290,12 +2385,17 @@ netxen_check_health(struct netxen_adapter *adapter)
2290 if (heartbit != adapter->heartbit) { 2385 if (heartbit != adapter->heartbit) {
2291 adapter->heartbit = heartbit; 2386 adapter->heartbit = heartbit;
2292 adapter->fw_fail_cnt = 0; 2387 adapter->fw_fail_cnt = 0;
2388 if (adapter->need_fw_reset)
2389 goto detach;
2293 return 0; 2390 return 0;
2294 } 2391 }
2295 2392
2296 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) 2393 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2297 return 0; 2394 return 0;
2298 2395
2396 if (nx_dev_request_reset(adapter))
2397 return 0;
2398
2299 clear_bit(__NX_FW_ATTACHED, &adapter->state); 2399 clear_bit(__NX_FW_ATTACHED, &adapter->state);
2300 2400
2301 dev_info(&netdev->dev, "firmware hang detected\n"); 2401 dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2498,7 +2598,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2498 return size; 2598 return size;
2499} 2599}
2500 2600
2501ssize_t netxen_sysfs_write_mem(struct kobject *kobj, 2601static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
2502 struct bin_attribute *attr, char *buf, 2602 struct bin_attribute *attr, char *buf,
2503 loff_t offset, size_t size) 2603 loff_t offset, size_t size)
2504{ 2604{
@@ -2725,6 +2825,12 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
2725{ } 2825{ }
2726#endif 2826#endif
2727 2827
2828static struct pci_error_handlers netxen_err_handler = {
2829 .error_detected = netxen_io_error_detected,
2830 .slot_reset = netxen_io_slot_reset,
2831 .resume = netxen_io_resume,
2832};
2833
2728static struct pci_driver netxen_driver = { 2834static struct pci_driver netxen_driver = {
2729 .name = netxen_nic_driver_name, 2835 .name = netxen_nic_driver_name,
2730 .id_table = netxen_pci_tbl, 2836 .id_table = netxen_pci_tbl,
@@ -2734,7 +2840,8 @@ static struct pci_driver netxen_driver = {
2734 .suspend = netxen_nic_suspend, 2840 .suspend = netxen_nic_suspend,
2735 .resume = netxen_nic_resume, 2841 .resume = netxen_nic_resume,
2736#endif 2842#endif
2737 .shutdown = netxen_nic_shutdown 2843 .shutdown = netxen_nic_shutdown,
2844 .err_handler = &netxen_err_handler
2738}; 2845};
2739 2846
2740static int __init netxen_init_module(void) 2847static int __init netxen_init_module(void)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2aed2b382c4..af9a8647c7e 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -58,7 +58,7 @@ static void writeq(u64 val, void __iomem *reg)
58} 58}
59#endif 59#endif
60 60
61static struct pci_device_id niu_pci_tbl[] = { 61static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
62 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, 62 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
63 {} 63 {}
64}; 64};
@@ -6372,7 +6372,7 @@ static void niu_set_rx_mode(struct net_device *dev)
6372 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) 6372 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
6373 np->flags |= NIU_FLAGS_MCAST; 6373 np->flags |= NIU_FLAGS_MCAST;
6374 6374
6375 alt_cnt = dev->uc.count; 6375 alt_cnt = netdev_uc_count(dev);
6376 if (alt_cnt > niu_num_alt_addr(np)) { 6376 if (alt_cnt > niu_num_alt_addr(np)) {
6377 alt_cnt = 0; 6377 alt_cnt = 0;
6378 np->flags |= NIU_FLAGS_PROMISC; 6378 np->flags |= NIU_FLAGS_PROMISC;
@@ -6381,7 +6381,7 @@ static void niu_set_rx_mode(struct net_device *dev)
6381 if (alt_cnt) { 6381 if (alt_cnt) {
6382 int index = 0; 6382 int index = 0;
6383 6383
6384 list_for_each_entry(ha, &dev->uc.list, list) { 6384 netdev_for_each_uc_addr(ha, dev) {
6385 err = niu_set_alt_mac(np, index, ha->addr); 6385 err = niu_set_alt_mac(np, index, ha->addr);
6386 if (err) 6386 if (err)
6387 printk(KERN_WARNING PFX "%s: Error %d " 6387 printk(KERN_WARNING PFX "%s: Error %d "
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 1f6327d4153..a3b6aa0f375 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -2292,7 +2292,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
2292 pci_set_drvdata(pci_dev, NULL); 2292 pci_set_drvdata(pci_dev, NULL);
2293} 2293}
2294 2294
2295static struct pci_device_id ns83820_pci_tbl[] = { 2295static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
2296 { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, }, 2296 { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
2297 { 0, }, 2297 { 0, },
2298}; 2298};
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 050538bf155..6fd8789ef48 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -1119,11 +1119,8 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
1119 1119
1120 if (p->port >= octeon_bootinfo->mac_addr_count) 1120 if (p->port >= octeon_bootinfo->mac_addr_count)
1121 dev_err(&pdev->dev, 1121 dev_err(&pdev->dev,
1122 "Error %s: Using MAC outside of the assigned range: " 1122 "Error %s: Using MAC outside of the assigned range: %pM\n",
1123 "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name, 1123 netdev->name, netdev->dev_addr);
1124 netdev->dev_addr[0], netdev->dev_addr[1],
1125 netdev->dev_addr[2], netdev->dev_addr[3],
1126 netdev->dev_addr[4], netdev->dev_addr[5]);
1127 1124
1128 if (register_netdev(netdev)) 1125 if (register_netdev(netdev))
1129 goto err; 1126 goto err;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 1673eb045e1..d44d4a208bb 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1875,7 +1875,7 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1875 free_netdev(netdev); 1875 free_netdev(netdev);
1876} 1876}
1877 1877
1878static struct pci_device_id pasemi_mac_pci_tbl[] = { 1878static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
1879 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, 1879 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1880 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, 1880 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1881 { }, 1881 { },
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 480af402aff..20273832bfc 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -211,7 +211,7 @@ static struct {
211}; 211};
212 212
213 213
214static struct pci_device_id netdrv_pci_tbl[] = { 214static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
215 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 215 {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
216 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB }, 216 {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
217 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX }, 217 {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d431b59e7d1..2ee57bd52a0 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1065,14 +1065,11 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
1065 1065
1066 spin_lock_irqsave(&ei_local->page_lock, flags); 1066 spin_lock_irqsave(&ei_local->page_lock, flags);
1067 outb_p(0x00, e8390_base + EN0_IMR); 1067 outb_p(0x00, e8390_base + EN0_IMR);
1068 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1069 1068
1070 /* 1069 /*
1071 * Slow phase with lock held. 1070 * Slow phase with lock held.
1072 */ 1071 */
1073 1072
1074 spin_lock_irqsave(&ei_local->page_lock, flags);
1075
1076 ei_local->irqlock = 1; 1073 ei_local->irqlock = 1;
1077 1074
1078 send_length = max(length, ETH_ZLEN); 1075 send_length = max(length, ETH_ZLEN);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index e154677ff70..0dc7ff896ee 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -59,7 +59,7 @@ static const char *const version =
59/* 59/*
60 * PCI device identifiers for "new style" Linux PCI Device Drivers 60 * PCI device identifiers for "new style" Linux PCI Device Drivers
61 */ 61 */
62static struct pci_device_id pcnet32_pci_tbl[] = { 62static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
63 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, 63 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, 64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
65 65
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 6f69b9ba0df..65ed385c2ce 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -63,6 +63,7 @@
63#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb 63#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb
64#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3 64#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3
65#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4 65#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
66#define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9
66#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000 67#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000
67#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000 68#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000
68 69
@@ -269,6 +270,43 @@ static int m88e1111_config_init(struct phy_device *phydev)
269 return err; 270 return err;
270 } 271 }
271 272
273 if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
274 temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
275 if (temp < 0)
276 return temp;
277 temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
278 err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
279 if (err < 0)
280 return err;
281
282 temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
283 if (temp < 0)
284 return temp;
285 temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
286 temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
287 err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
288 if (err < 0)
289 return err;
290
291 /* soft reset */
292 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
293 if (err < 0)
294 return err;
295 do
296 temp = phy_read(phydev, MII_BMCR);
297 while (temp & BMCR_RESET);
298
299 temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
300 if (temp < 0)
301 return temp;
302 temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
303 temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
304 err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
305 if (err < 0)
306 return err;
307 }
308
309
272 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 310 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
273 if (err < 0) 311 if (err < 0)
274 return err; 312 return err;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 5123bb954dd..ed2644a5750 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -25,6 +25,7 @@
25 25
26#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ 26#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
27#define MII_LAN83C185_IM 30 /* Interrupt Mask */ 27#define MII_LAN83C185_IM 30 /* Interrupt Mask */
28#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
28 29
29#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ 30#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
30#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ 31#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
@@ -37,8 +38,10 @@
37#define MII_LAN83C185_ISF_INT_ALL (0x0e) 38#define MII_LAN83C185_ISF_INT_ALL (0x0e)
38 39
39#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ 40#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
40 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4) 41 (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
42 MII_LAN83C185_ISF_INT7)
41 43
44#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
42 45
43static int smsc_phy_config_intr(struct phy_device *phydev) 46static int smsc_phy_config_intr(struct phy_device *phydev)
44{ 47{
@@ -59,9 +62,23 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
59 62
60static int smsc_phy_config_init(struct phy_device *phydev) 63static int smsc_phy_config_init(struct phy_device *phydev)
61{ 64{
65 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
66 if (rc < 0)
67 return rc;
68
69 /* Enable energy detect mode for this SMSC Transceivers */
70 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
71 rc | MII_LAN83C185_EDPWRDOWN);
72 if (rc < 0)
73 return rc;
74
62 return smsc_phy_ack_interrupt (phydev); 75 return smsc_phy_ack_interrupt (phydev);
63} 76}
64 77
78static int lan911x_config_init(struct phy_device *phydev)
79{
80 return smsc_phy_ack_interrupt(phydev);
81}
65 82
66static struct phy_driver lan83c185_driver = { 83static struct phy_driver lan83c185_driver = {
67 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ 84 .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -147,7 +164,7 @@ static struct phy_driver lan911x_int_driver = {
147 /* basic functions */ 164 /* basic functions */
148 .config_aneg = genphy_config_aneg, 165 .config_aneg = genphy_config_aneg,
149 .read_status = genphy_read_status, 166 .read_status = genphy_read_status,
150 .config_init = smsc_phy_config_init, 167 .config_init = lan911x_config_init,
151 168
152 /* IRQ related */ 169 /* IRQ related */
153 .ack_interrupt = smsc_phy_ack_interrupt, 170 .ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 2282e729edb..6d61602208c 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -167,7 +167,7 @@ struct channel {
167 u8 avail; /* flag used in multilink stuff */ 167 u8 avail; /* flag used in multilink stuff */
168 u8 had_frag; /* >= 1 fragments have been sent */ 168 u8 had_frag; /* >= 1 fragments have been sent */
169 u32 lastseq; /* MP: last sequence # received */ 169 u32 lastseq; /* MP: last sequence # received */
170 int speed; /* speed of the corresponding ppp channel*/ 170 int speed; /* speed of the corresponding ppp channel*/
171#endif /* CONFIG_PPP_MULTILINK */ 171#endif /* CONFIG_PPP_MULTILINK */
172}; 172};
173 173
@@ -1293,13 +1293,13 @@ ppp_push(struct ppp *ppp)
1293 */ 1293 */
1294static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1294static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1295{ 1295{
1296 int len, totlen; 1296 int len, totlen;
1297 int i, bits, hdrlen, mtu; 1297 int i, bits, hdrlen, mtu;
1298 int flen; 1298 int flen;
1299 int navail, nfree, nzero; 1299 int navail, nfree, nzero;
1300 int nbigger; 1300 int nbigger;
1301 int totspeed; 1301 int totspeed;
1302 int totfree; 1302 int totfree;
1303 unsigned char *p, *q; 1303 unsigned char *p, *q;
1304 struct list_head *list; 1304 struct list_head *list;
1305 struct channel *pch; 1305 struct channel *pch;
@@ -1307,21 +1307,21 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1307 struct ppp_channel *chan; 1307 struct ppp_channel *chan;
1308 1308
1309 totspeed = 0; /*total bitrate of the bundle*/ 1309 totspeed = 0; /*total bitrate of the bundle*/
1310 nfree = 0; /* # channels which have no packet already queued */ 1310 nfree = 0; /* # channels which have no packet already queued */
1311 navail = 0; /* total # of usable channels (not deregistered) */ 1311 navail = 0; /* total # of usable channels (not deregistered) */
1312 nzero = 0; /* number of channels with zero speed associated*/ 1312 nzero = 0; /* number of channels with zero speed associated*/
1313 totfree = 0; /*total # of channels available and 1313 totfree = 0; /*total # of channels available and
1314 *having no queued packets before 1314 *having no queued packets before
1315 *starting the fragmentation*/ 1315 *starting the fragmentation*/
1316 1316
1317 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1317 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1318 i = 0; 1318 i = 0;
1319 list_for_each_entry(pch, &ppp->channels, clist) { 1319 list_for_each_entry(pch, &ppp->channels, clist) {
1320 navail += pch->avail = (pch->chan != NULL); 1320 navail += pch->avail = (pch->chan != NULL);
1321 pch->speed = pch->chan->speed; 1321 pch->speed = pch->chan->speed;
1322 if (pch->avail) { 1322 if (pch->avail) {
1323 if (skb_queue_empty(&pch->file.xq) || 1323 if (skb_queue_empty(&pch->file.xq) ||
1324 !pch->had_frag) { 1324 !pch->had_frag) {
1325 if (pch->speed == 0) 1325 if (pch->speed == 0)
1326 nzero++; 1326 nzero++;
1327 else 1327 else
@@ -1331,60 +1331,60 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1331 ++nfree; 1331 ++nfree;
1332 ++totfree; 1332 ++totfree;
1333 } 1333 }
1334 if (!pch->had_frag && i < ppp->nxchan) 1334 if (!pch->had_frag && i < ppp->nxchan)
1335 ppp->nxchan = i; 1335 ppp->nxchan = i;
1336 } 1336 }
1337 ++i; 1337 ++i;
1338 } 1338 }
1339 /* 1339 /*
1340 * Don't start sending this packet unless at least half of 1340 * Don't start sending this packet unless at least half of
1341 * the channels are free. This gives much better TCP 1341 * the channels are free. This gives much better TCP
1342 * performance if we have a lot of channels. 1342 * performance if we have a lot of channels.
1343 */ 1343 */
1344 if (nfree == 0 || nfree < navail / 2) 1344 if (nfree == 0 || nfree < navail / 2)
1345 return 0; /* can't take now, leave it in xmit_pending */ 1345 return 0; /* can't take now, leave it in xmit_pending */
1346 1346
1347 /* Do protocol field compression (XXX this should be optional) */ 1347 /* Do protocol field compression (XXX this should be optional) */
1348 p = skb->data; 1348 p = skb->data;
1349 len = skb->len; 1349 len = skb->len;
1350 if (*p == 0) { 1350 if (*p == 0) {
1351 ++p; 1351 ++p;
1352 --len; 1352 --len;
1353 } 1353 }
1354 1354
1355 totlen = len; 1355 totlen = len;
1356 nbigger = len % nfree; 1356 nbigger = len % nfree;
1357 1357
1358 /* skip to the channel after the one we last used 1358 /* skip to the channel after the one we last used
1359 and start at that one */ 1359 and start at that one */
1360 list = &ppp->channels; 1360 list = &ppp->channels;
1361 for (i = 0; i < ppp->nxchan; ++i) { 1361 for (i = 0; i < ppp->nxchan; ++i) {
1362 list = list->next; 1362 list = list->next;
1363 if (list == &ppp->channels) { 1363 if (list == &ppp->channels) {
1364 i = 0; 1364 i = 0;
1365 break; 1365 break;
1366 } 1366 }
1367 } 1367 }
1368 1368
1369 /* create a fragment for each channel */ 1369 /* create a fragment for each channel */
1370 bits = B; 1370 bits = B;
1371 while (len > 0) { 1371 while (len > 0) {
1372 list = list->next; 1372 list = list->next;
1373 if (list == &ppp->channels) { 1373 if (list == &ppp->channels) {
1374 i = 0; 1374 i = 0;
1375 continue; 1375 continue;
1376 } 1376 }
1377 pch = list_entry(list, struct channel, clist); 1377 pch = list_entry(list, struct channel, clist);
1378 ++i; 1378 ++i;
1379 if (!pch->avail) 1379 if (!pch->avail)
1380 continue; 1380 continue;
1381 1381
1382 /* 1382 /*
1383 * Skip this channel if it has a fragment pending already and 1383 * Skip this channel if it has a fragment pending already and
1384 * we haven't given a fragment to all of the free channels. 1384 * we haven't given a fragment to all of the free channels.
1385 */ 1385 */
1386 if (pch->avail == 1) { 1386 if (pch->avail == 1) {
1387 if (nfree > 0) 1387 if (nfree > 0)
1388 continue; 1388 continue;
1389 } else { 1389 } else {
1390 pch->avail = 1; 1390 pch->avail = 1;
@@ -1393,32 +1393,32 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1393 /* check the channel's mtu and whether it is still attached. */ 1393 /* check the channel's mtu and whether it is still attached. */
1394 spin_lock_bh(&pch->downl); 1394 spin_lock_bh(&pch->downl);
1395 if (pch->chan == NULL) { 1395 if (pch->chan == NULL) {
1396 /* can't use this channel, it's being deregistered */ 1396 /* can't use this channel, it's being deregistered */
1397 if (pch->speed == 0) 1397 if (pch->speed == 0)
1398 nzero--; 1398 nzero--;
1399 else 1399 else
1400 totspeed -= pch->speed; 1400 totspeed -= pch->speed;
1401 1401
1402 spin_unlock_bh(&pch->downl); 1402 spin_unlock_bh(&pch->downl);
1403 pch->avail = 0; 1403 pch->avail = 0;
1404 totlen = len; 1404 totlen = len;
1405 totfree--; 1405 totfree--;
1406 nfree--; 1406 nfree--;
1407 if (--navail == 0) 1407 if (--navail == 0)
1408 break; 1408 break;
1409 continue; 1409 continue;
1410 } 1410 }
1411 1411
1412 /* 1412 /*
1413 *if the channel speed is not set divide 1413 *if the channel speed is not set divide
1414 *the packet evenly among the free channels; 1414 *the packet evenly among the free channels;
1415 *otherwise divide it according to the speed 1415 *otherwise divide it according to the speed
1416 *of the channel we are going to transmit on 1416 *of the channel we are going to transmit on
1417 */ 1417 */
1418 flen = len; 1418 flen = len;
1419 if (nfree > 0) { 1419 if (nfree > 0) {
1420 if (pch->speed == 0) { 1420 if (pch->speed == 0) {
1421 flen = totlen/nfree ; 1421 flen = totlen/nfree;
1422 if (nbigger > 0) { 1422 if (nbigger > 0) {
1423 flen++; 1423 flen++;
1424 nbigger--; 1424 nbigger--;
@@ -1436,8 +1436,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1436 } 1436 }
1437 1437
1438 /* 1438 /*
1439 *check if we are on the last channel or 1439 *check if we are on the last channel or
1440 *we exceded the lenght of the data to 1440 *we exceded the lenght of the data to
1441 *fragment 1441 *fragment
1442 */ 1442 */
1443 if ((nfree <= 0) || (flen > len)) 1443 if ((nfree <= 0) || (flen > len))
@@ -1448,29 +1448,29 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1448 *above formula will be equal or less than zero. 1448 *above formula will be equal or less than zero.
1449 *Skip the channel in this case 1449 *Skip the channel in this case
1450 */ 1450 */
1451 if (flen <= 0) { 1451 if (flen <= 0) {
1452 pch->avail = 2; 1452 pch->avail = 2;
1453 spin_unlock_bh(&pch->downl); 1453 spin_unlock_bh(&pch->downl);
1454 continue; 1454 continue;
1455 } 1455 }
1456 1456
1457 mtu = pch->chan->mtu - hdrlen; 1457 mtu = pch->chan->mtu - hdrlen;
1458 if (mtu < 4) 1458 if (mtu < 4)
1459 mtu = 4; 1459 mtu = 4;
1460 if (flen > mtu) 1460 if (flen > mtu)
1461 flen = mtu; 1461 flen = mtu;
1462 if (flen == len) 1462 if (flen == len)
1463 bits |= E; 1463 bits |= E;
1464 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); 1464 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1465 if (!frag) 1465 if (!frag)
1466 goto noskb; 1466 goto noskb;
1467 q = skb_put(frag, flen + hdrlen); 1467 q = skb_put(frag, flen + hdrlen);
1468 1468
1469 /* make the MP header */ 1469 /* make the MP header */
1470 q[0] = PPP_MP >> 8; 1470 q[0] = PPP_MP >> 8;
1471 q[1] = PPP_MP; 1471 q[1] = PPP_MP;
1472 if (ppp->flags & SC_MP_XSHORTSEQ) { 1472 if (ppp->flags & SC_MP_XSHORTSEQ) {
1473 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1473 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1474 q[3] = ppp->nxseq; 1474 q[3] = ppp->nxseq;
1475 } else { 1475 } else {
1476 q[2] = bits; 1476 q[2] = bits;
@@ -1483,24 +1483,24 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1483 1483
1484 /* try to send it down the channel */ 1484 /* try to send it down the channel */
1485 chan = pch->chan; 1485 chan = pch->chan;
1486 if (!skb_queue_empty(&pch->file.xq) || 1486 if (!skb_queue_empty(&pch->file.xq) ||
1487 !chan->ops->start_xmit(chan, frag)) 1487 !chan->ops->start_xmit(chan, frag))
1488 skb_queue_tail(&pch->file.xq, frag); 1488 skb_queue_tail(&pch->file.xq, frag);
1489 pch->had_frag = 1; 1489 pch->had_frag = 1;
1490 p += flen; 1490 p += flen;
1491 len -= flen; 1491 len -= flen;
1492 ++ppp->nxseq; 1492 ++ppp->nxseq;
1493 bits = 0; 1493 bits = 0;
1494 spin_unlock_bh(&pch->downl); 1494 spin_unlock_bh(&pch->downl);
1495 } 1495 }
1496 ppp->nxchan = i; 1496 ppp->nxchan = i;
1497 1497
1498 return 1; 1498 return 1;
1499 1499
1500 noskb: 1500 noskb:
1501 spin_unlock_bh(&pch->downl); 1501 spin_unlock_bh(&pch->downl);
1502 if (ppp->debug & 1) 1502 if (ppp->debug & 1)
1503 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1503 printk(KERN_ERR "PPP: no memory (fragment)\n");
1504 ++ppp->dev->stats.tx_errors; 1504 ++ppp->dev->stats.tx_errors;
1505 ++ppp->nxseq; 1505 ++ppp->nxseq;
1506 return 1; /* abandon the frame */ 1506 return 1; /* abandon the frame */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index dd35066a7f8..4ef0afbcbe1 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -61,7 +61,7 @@ static int msi;
61module_param(msi, int, 0); 61module_param(msi, int, 0);
62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63 63
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { 64static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
67 /* required last entry */ 67 /* required last entry */
@@ -4087,7 +4087,6 @@ static void __devexit ql3xxx_remove(struct pci_dev *pdev)
4087 struct ql3_adapter *qdev = netdev_priv(ndev); 4087 struct ql3_adapter *qdev = netdev_priv(ndev);
4088 4088
4089 unregister_netdev(ndev); 4089 unregister_netdev(ndev);
4090 qdev = netdev_priv(ndev);
4091 4090
4092 ql_disable_interrupts(qdev); 4091 ql_disable_interrupts(qdev);
4093 4092
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 00000000000..ddba83ef3f4
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
3#
4
5obj-$(CONFIG_QLCNIC) := qlcnic.o
6
7qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
8 qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 00000000000..b40a851ec7d
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1126 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#ifndef _QLCNIC_H_
26#define _QLCNIC_H_
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/types.h>
31#include <linux/ioport.h>
32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/tcp.h>
38#include <linux/skbuff.h>
39#include <linux/firmware.h>
40
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/timer.h>
44
45#include <linux/vmalloc.h>
46
47#include <linux/io.h>
48#include <asm/byteorder.h>
49
50#include "qlcnic_hdr.h"
51
52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 0
55#define QLCNIC_LINUX_VERSIONID "5.0.0"
56
57#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
58#define _major(v) (((v) >> 24) & 0xff)
59#define _minor(v) (((v) >> 16) & 0xff)
60#define _build(v) ((v) & 0xffff)
61
62/* version in image has weird encoding:
63 * 7:0 - major
64 * 15:8 - minor
65 * 31:16 - build (little endian)
66 */
67#define QLCNIC_DECODE_VERSION(v) \
68 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
69
70#define QLCNIC_NUM_FLASH_SECTORS (64)
71#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
72#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
73 * QLCNIC_FLASH_SECTOR_SIZE)
74
75#define RCV_DESC_RINGSIZE(rds_ring) \
76 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
77#define RCV_BUFF_RINGSIZE(rds_ring) \
78 (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
79#define STATUS_DESC_RINGSIZE(sds_ring) \
80 (sizeof(struct status_desc) * (sds_ring)->num_desc)
81#define TX_BUFF_RINGSIZE(tx_ring) \
82 (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
83#define TX_DESC_RINGSIZE(tx_ring) \
84 (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
85
86#define QLCNIC_P3P_A0 0x50
87
88#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
89
90#define FIRST_PAGE_GROUP_START 0
91#define FIRST_PAGE_GROUP_END 0x100000
92
93#define P3_MAX_MTU (9600)
94#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
95
96#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
97#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
98#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
99#define QLCNIC_LRO_BUFFER_EXTRA 2048
100
101#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
102
103/* Opcodes to be used with the commands */
104#define TX_ETHER_PKT 0x01
105#define TX_TCP_PKT 0x02
106#define TX_UDP_PKT 0x03
107#define TX_IP_PKT 0x04
108#define TX_TCP_LSO 0x05
109#define TX_TCP_LSO6 0x06
110#define TX_IPSEC 0x07
111#define TX_IPSEC_CMD 0x0a
112#define TX_TCPV6_PKT 0x0b
113#define TX_UDPV6_PKT 0x0c
114
115/* Tx defines */
116#define MAX_BUFFERS_PER_CMD 32
117#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
118#define QLCNIC_MAX_TX_TIMEOUTS 2
119
120/*
121 * Following are the states of the Phantom. Phantom will set them and
122 * Host will read to check if the fields are correct.
123 */
124#define PHAN_INITIALIZE_FAILED 0xffff
125#define PHAN_INITIALIZE_COMPLETE 0xff01
126
127/* Host writes the following to notify that it has done the init-handshake */
128#define PHAN_INITIALIZE_ACK 0xf00f
129#define PHAN_PEG_RCV_INITIALIZED 0xff01
130
131#define NUM_RCV_DESC_RINGS 3
132#define NUM_STS_DESC_RINGS 4
133
134#define RCV_RING_NORMAL 0
135#define RCV_RING_JUMBO 1
136#define RCV_RING_LRO 2
137
138#define MIN_CMD_DESCRIPTORS 64
139#define MIN_RCV_DESCRIPTORS 64
140#define MIN_JUMBO_DESCRIPTORS 32
141
142#define MAX_CMD_DESCRIPTORS 1024
143#define MAX_RCV_DESCRIPTORS_1G 4096
144#define MAX_RCV_DESCRIPTORS_10G 8192
145#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
146#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
147#define MAX_LRO_RCV_DESCRIPTORS 8
148
149#define DEFAULT_RCV_DESCRIPTORS_1G 2048
150#define DEFAULT_RCV_DESCRIPTORS_10G 4096
151
152#define get_next_index(index, length) \
153 (((index) + 1) & ((length) - 1))
154
155#define MPORT_MULTI_FUNCTION_MODE 0x2222
156
157/*
158 * Following data structures describe the descriptors that will be used.
159 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
160 * we are doing LSO (above the 1500 size packet) only.
161 */
162
163#define FLAGS_VLAN_TAGGED 0x10
164#define FLAGS_VLAN_OOB 0x40
165
166#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
167 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
168#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
169 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
170#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
171 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
172
173#define qlcnic_set_tx_port(_desc, _port) \
174 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
175
176#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
177 ((_desc)->flags_opcode = \
178 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
179
180#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
181 ((_desc)->nfrags__length = \
182 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
183
184struct cmd_desc_type0 {
185 u8 tcp_hdr_offset; /* For LSO only */
186 u8 ip_hdr_offset; /* For LSO only */
187 __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
188 __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
189
190 __le64 addr_buffer2;
191
192 __le16 reference_handle;
193 __le16 mss;
194 u8 port_ctxid; /* 7:4 ctxid 3:0 port */
195 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
196 __le16 conn_id; /* IPSec offoad only */
197
198 __le64 addr_buffer3;
199 __le64 addr_buffer1;
200
201 __le16 buffer_length[4];
202
203 __le64 addr_buffer4;
204
205 __le32 reserved2;
206 __le16 reserved;
207 __le16 vlan_TCI;
208
209} __attribute__ ((aligned(64)));
210
211/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
212struct rcv_desc {
213 __le16 reference_handle;
214 __le16 reserved;
215 __le32 buffer_length; /* allocated buffer length (usually 2K) */
216 __le64 addr_buffer;
217};
218
219/* opcode field in status_desc */
220#define QLCNIC_SYN_OFFLOAD 0x03
221#define QLCNIC_RXPKT_DESC 0x04
222#define QLCNIC_OLD_RXPKT_DESC 0x3f
223#define QLCNIC_RESPONSE_DESC 0x05
224#define QLCNIC_LRO_DESC 0x12
225
226/* for status field in status_desc */
227#define STATUS_CKSUM_OK (2)
228
229/* owner bits of status_desc */
230#define STATUS_OWNER_HOST (0x1ULL << 56)
231#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
232
233/* Status descriptor:
234 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
235 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
236 53-55 desc_cnt, 56-57 owner, 58-63 opcode
237 */
238#define qlcnic_get_sts_port(sts_data) \
239 ((sts_data) & 0x0F)
240#define qlcnic_get_sts_status(sts_data) \
241 (((sts_data) >> 4) & 0x0F)
242#define qlcnic_get_sts_type(sts_data) \
243 (((sts_data) >> 8) & 0x0F)
244#define qlcnic_get_sts_totallength(sts_data) \
245 (((sts_data) >> 12) & 0xFFFF)
246#define qlcnic_get_sts_refhandle(sts_data) \
247 (((sts_data) >> 28) & 0xFFFF)
248#define qlcnic_get_sts_prot(sts_data) \
249 (((sts_data) >> 44) & 0x0F)
250#define qlcnic_get_sts_pkt_offset(sts_data) \
251 (((sts_data) >> 48) & 0x1F)
252#define qlcnic_get_sts_desc_cnt(sts_data) \
253 (((sts_data) >> 53) & 0x7)
254#define qlcnic_get_sts_opcode(sts_data) \
255 (((sts_data) >> 58) & 0x03F)
256
257#define qlcnic_get_lro_sts_refhandle(sts_data) \
258 ((sts_data) & 0x0FFFF)
259#define qlcnic_get_lro_sts_length(sts_data) \
260 (((sts_data) >> 16) & 0x0FFFF)
261#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
262 (((sts_data) >> 32) & 0x0FF)
263#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
264 (((sts_data) >> 40) & 0x0FF)
265#define qlcnic_get_lro_sts_timestamp(sts_data) \
266 (((sts_data) >> 48) & 0x1)
267#define qlcnic_get_lro_sts_type(sts_data) \
268 (((sts_data) >> 49) & 0x7)
269#define qlcnic_get_lro_sts_push_flag(sts_data) \
270 (((sts_data) >> 52) & 0x1)
271#define qlcnic_get_lro_sts_seq_number(sts_data) \
272 ((sts_data) & 0x0FFFFFFFF)
273
274
275struct status_desc {
276 __le64 status_desc_data[2];
277} __attribute__ ((aligned(16)));
278
279/* UNIFIED ROMIMAGE */
280#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
281#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
282#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
283#define QLCNIC_UNI_DIR_SECT_FW 0x7
284
285/*Offsets */
286#define QLCNIC_UNI_CHIP_REV_OFF 10
287#define QLCNIC_UNI_FLAGS_OFF 11
288#define QLCNIC_UNI_BIOS_VERSION_OFF 12
289#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
290#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
291
292struct uni_table_desc{
293 u32 findex;
294 u32 num_entries;
295 u32 entry_size;
296 u32 reserved[5];
297};
298
299struct uni_data_desc{
300 u32 findex;
301 u32 size;
302 u32 reserved[5];
303};
304
305/* Magic number to let user know flash is programmed */
306#define QLCNIC_BDINFO_MAGIC 0x12345678
307
308#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021
309#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022
310#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023
311#define QLCNIC_BRDTYPE_P3_4_GB 0x0024
312#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025
313#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026
314#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027
315#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028
316#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029
317#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a
318#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b
319#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031
320#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
321#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
322
323/* Flash memory map */
324#define QLCNIC_BRDCFG_START 0x4000 /* board config */
325#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
326#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
327#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
328
329#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
330#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
331#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
332#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
333
334#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
335#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
336
337#define QLCNIC_FW_MIN_SIZE (0x3fffff)
338#define QLCNIC_UNIFIED_ROMIMAGE 0
339#define QLCNIC_FLASH_ROMIMAGE 1
340#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
341
342#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
343#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
344
345extern char qlcnic_driver_name[];
346
347/* Number of status descriptors to handle per interrupt */
348#define MAX_STATUS_HANDLE (64)
349
350/*
351 * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
352 * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
353 */
354struct qlcnic_skb_frag {
355 u64 dma;
356 u64 length;
357};
358
359struct qlcnic_recv_crb {
360 u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
361 u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
362 u32 sw_int_mask[NUM_STS_DESC_RINGS];
363};
364
365/* Following defines are for the state of the buffers */
366#define QLCNIC_BUFFER_FREE 0
367#define QLCNIC_BUFFER_BUSY 1
368
369/*
370 * There will be one qlcnic_buffer per skb packet. These will be
371 * used to save the dma info for pci_unmap_page()
372 */
373struct qlcnic_cmd_buffer {
374 struct sk_buff *skb;
375 struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
376 u32 frag_count;
377};
378
379/* In rx_buffer, we do not need multiple fragments as is a single buffer */
380struct qlcnic_rx_buffer {
381 struct list_head list;
382 struct sk_buff *skb;
383 u64 dma;
384 u16 ref_handle;
385 u16 state;
386};
387
388/* Board types */
389#define QLCNIC_GBE 0x01
390#define QLCNIC_XGBE 0x02
391
392/*
393 * One hardware_context{} per adapter
394 * contains interrupt info as well shared hardware info.
395 */
396struct qlcnic_hardware_context {
397 void __iomem *pci_base0;
398 void __iomem *ocm_win_crb;
399
400 unsigned long pci_len0;
401
402 u32 ocm_win;
403 u32 crb_win;
404
405 rwlock_t crb_lock;
406 struct mutex mem_lock;
407
408 u8 cut_through;
409 u8 revision_id;
410 u8 pci_func;
411 u8 linkup;
412 u16 port_type;
413 u16 board_type;
414};
415
416struct qlcnic_adapter_stats {
417 u64 xmitcalled;
418 u64 xmitfinished;
419 u64 rxdropped;
420 u64 txdropped;
421 u64 csummed;
422 u64 rx_pkts;
423 u64 lro_pkts;
424 u64 rxbytes;
425 u64 txbytes;
426};
427
428/*
429 * Rcv Descriptor Context. One such per Rcv Descriptor. There may
430 * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
431 */
432struct qlcnic_host_rds_ring {
433 u32 producer;
434 u32 num_desc;
435 u32 dma_size;
436 u32 skb_size;
437 u32 flags;
438 void __iomem *crb_rcv_producer;
439 struct rcv_desc *desc_head;
440 struct qlcnic_rx_buffer *rx_buf_arr;
441 struct list_head free_list;
442 spinlock_t lock;
443 dma_addr_t phys_addr;
444};
445
446struct qlcnic_host_sds_ring {
447 u32 consumer;
448 u32 num_desc;
449 void __iomem *crb_sts_consumer;
450 void __iomem *crb_intr_mask;
451
452 struct status_desc *desc_head;
453 struct qlcnic_adapter *adapter;
454 struct napi_struct napi;
455 struct list_head free_list[NUM_RCV_DESC_RINGS];
456
457 int irq;
458
459 dma_addr_t phys_addr;
460 char name[IFNAMSIZ+4];
461};
462
463struct qlcnic_host_tx_ring {
464 u32 producer;
465 __le32 *hw_consumer;
466 u32 sw_consumer;
467 void __iomem *crb_cmd_producer;
468 u32 num_desc;
469
470 struct netdev_queue *txq;
471
472 struct qlcnic_cmd_buffer *cmd_buf_arr;
473 struct cmd_desc_type0 *desc_head;
474 dma_addr_t phys_addr;
475 dma_addr_t hw_cons_phys_addr;
476};
477
478/*
479 * Receive context. There is one such structure per instance of the
480 * receive processing. Any state information that is relevant to
481 * the receive, and is must be in this structure. The global data may be
482 * present elsewhere.
483 */
484struct qlcnic_recv_context {
485 u32 state;
486 u16 context_id;
487 u16 virt_port;
488
489 struct qlcnic_host_rds_ring *rds_rings;
490 struct qlcnic_host_sds_ring *sds_rings;
491};
492
493/* HW context creation */
494
495#define QLCNIC_OS_CRB_RETRY_COUNT 4000
496#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
497 (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
498
499#define QLCNIC_CDRP_CMD_BIT 0x80000000
500
501/*
502 * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
503 * in the crb QLCNIC_CDRP_CRB_OFFSET.
504 */
505#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
506#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
507
508#define QLCNIC_CDRP_RSP_OK 0x00000001
509#define QLCNIC_CDRP_RSP_FAIL 0x00000002
510#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
511
512/*
513 * All commands must have the QLCNIC_CDRP_CMD_BIT set in
514 * the crb QLCNIC_CDRP_CRB_OFFSET.
515 */
516#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
517#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
518
519#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
520#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
521#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
522#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
523#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
524#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
525#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
526#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
527#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
528#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
529#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
530#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
531#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
532#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
533#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
534#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
535#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
536#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
537#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
538#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
539#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
540#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
541#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
542#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
543#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
544#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
545#define QLCNIC_CDRP_CMD_MAX 0x0000001f
546
547#define QLCNIC_RCODE_SUCCESS 0
548#define QLCNIC_RCODE_TIMEOUT 17
549#define QLCNIC_DESTROY_CTX_RESET 0
550
551/*
552 * Capabilities Announced
553 */
554#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
555#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
556#define QLCNIC_CAP0_LSO (1 << 6)
557#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
558#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
559
560/*
561 * Context state
562 */
563#define QLCHAL_VERSION 1
564
565#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
566
567/*
568 * Rx context
569 */
570
571struct qlcnic_hostrq_sds_ring {
572 __le64 host_phys_addr; /* Ring base addr */
573 __le32 ring_size; /* Ring entries */
574 __le16 msi_index;
575 __le16 rsvd; /* Padding */
576};
577
578struct qlcnic_hostrq_rds_ring {
579 __le64 host_phys_addr; /* Ring base addr */
580 __le64 buff_size; /* Packet buffer size */
581 __le32 ring_size; /* Ring entries */
582 __le32 ring_kind; /* Class of ring */
583};
584
585struct qlcnic_hostrq_rx_ctx {
586 __le64 host_rsp_dma_addr; /* Response dma'd here */
587 __le32 capabilities[4]; /* Flag bit vector */
588 __le32 host_int_crb_mode; /* Interrupt crb usage */
589 __le32 host_rds_crb_mode; /* RDS crb usage */
590 /* These ring offsets are relative to data[0] below */
591 __le32 rds_ring_offset; /* Offset to RDS config */
592 __le32 sds_ring_offset; /* Offset to SDS config */
593 __le16 num_rds_rings; /* Count of RDS rings */
594 __le16 num_sds_rings; /* Count of SDS rings */
595 __le16 rsvd1; /* Padding */
596 __le16 rsvd2; /* Padding */
597 u8 reserved[128]; /* reserve space for future expansion*/
598 /* MUST BE 64-bit aligned.
599 The following is packed:
600 - N hostrq_rds_rings
601 - N hostrq_sds_rings */
602 char data[0];
603};
604
605struct qlcnic_cardrsp_rds_ring{
606 __le32 host_producer_crb; /* Crb to use */
607 __le32 rsvd1; /* Padding */
608};
609
610struct qlcnic_cardrsp_sds_ring {
611 __le32 host_consumer_crb; /* Crb to use */
612 __le32 interrupt_crb; /* Crb to use */
613};
614
615struct qlcnic_cardrsp_rx_ctx {
616 /* These ring offsets are relative to data[0] below */
617 __le32 rds_ring_offset; /* Offset to RDS config */
618 __le32 sds_ring_offset; /* Offset to SDS config */
619 __le32 host_ctx_state; /* Starting State */
620 __le32 num_fn_per_port; /* How many PCI fn share the port */
621 __le16 num_rds_rings; /* Count of RDS rings */
622 __le16 num_sds_rings; /* Count of SDS rings */
623 __le16 context_id; /* Handle for context */
624 u8 phys_port; /* Physical id of port */
625 u8 virt_port; /* Virtual/Logical id of port */
626 u8 reserved[128]; /* save space for future expansion */
627 /* MUST BE 64-bit aligned.
628 The following is packed:
629 - N cardrsp_rds_rings
630 - N cardrs_sds_rings */
631 char data[0];
632};
633
634#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
635 (sizeof(HOSTRQ_RX) + \
636 (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
637 (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
638
639#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
640 (sizeof(CARDRSP_RX) + \
641 (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
642 (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
643
644/*
645 * Tx context
646 */
647
648struct qlcnic_hostrq_cds_ring {
649 __le64 host_phys_addr; /* Ring base addr */
650 __le32 ring_size; /* Ring entries */
651 __le32 rsvd; /* Padding */
652};
653
654struct qlcnic_hostrq_tx_ctx {
655 __le64 host_rsp_dma_addr; /* Response dma'd here */
656 __le64 cmd_cons_dma_addr; /* */
657 __le64 dummy_dma_addr; /* */
658 __le32 capabilities[4]; /* Flag bit vector */
659 __le32 host_int_crb_mode; /* Interrupt crb usage */
660 __le32 rsvd1; /* Padding */
661 __le16 rsvd2; /* Padding */
662 __le16 interrupt_ctl;
663 __le16 msi_index;
664 __le16 rsvd3; /* Padding */
665 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
666 u8 reserved[128]; /* future expansion */
667};
668
669struct qlcnic_cardrsp_cds_ring {
670 __le32 host_producer_crb; /* Crb to use */
671 __le32 interrupt_crb; /* Crb to use */
672};
673
674struct qlcnic_cardrsp_tx_ctx {
675 __le32 host_ctx_state; /* Starting state */
676 __le16 context_id; /* Handle for context */
677 u8 phys_port; /* Physical id of port */
678 u8 virt_port; /* Virtual/Logical id of port */
679 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
680 u8 reserved[128]; /* future expansion */
681};
682
683#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
684#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
685
686/* CRB */
687
688#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
689#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
690#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
691#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
692
693#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
694#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
695#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
696#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
697#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
698
699
700/* MAC */
701
702#define MC_COUNT_P3 38
703
704#define QLCNIC_MAC_NOOP 0
705#define QLCNIC_MAC_ADD 1
706#define QLCNIC_MAC_DEL 2
707
708struct qlcnic_mac_list_s {
709 struct list_head list;
710 uint8_t mac_addr[ETH_ALEN+2];
711};
712
713/*
714 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
715 * adjusted based on configured MTU.
716 */
717#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
718#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
719#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
720#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
721
722#define QLCNIC_INTR_DEFAULT 0x04
723
724union qlcnic_nic_intr_coalesce_data {
725 struct {
726 u16 rx_packets;
727 u16 rx_time_us;
728 u16 tx_packets;
729 u16 tx_time_us;
730 } data;
731 u64 word;
732};
733
734struct qlcnic_nic_intr_coalesce {
735 u16 stats_time_us;
736 u16 rate_sample_time;
737 u16 flags;
738 u16 rsvd_1;
739 u32 low_threshold;
740 u32 high_threshold;
741 union qlcnic_nic_intr_coalesce_data normal;
742 union qlcnic_nic_intr_coalesce_data low;
743 union qlcnic_nic_intr_coalesce_data high;
744 union qlcnic_nic_intr_coalesce_data irq;
745};
746
747#define QLCNIC_HOST_REQUEST 0x13
748#define QLCNIC_REQUEST 0x14
749
750#define QLCNIC_MAC_EVENT 0x1
751
752#define QLCNIC_IP_UP 2
753#define QLCNIC_IP_DOWN 3
754
755/*
756 * Driver --> Firmware
757 */
758#define QLCNIC_H2C_OPCODE_START 0
759#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
760#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
761#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
762#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
763#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
764#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
765#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
766#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
767#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
768#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
769#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
770#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
771#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
772#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
773#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
774#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
775#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
776#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
777#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
778#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
779#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
780#define QLCNIC_C2C_OPCODE 22
781#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
782#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
783#define QLCNIC_H2C_OPCODE_LAST 25
784/*
785 * Firmware --> Driver
786 */
787
788#define QLCNIC_C2H_OPCODE_START 128
789#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
790#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
791#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
792#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
793#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
794#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
795#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
796#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
797#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
798#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
799#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
800#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
801#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
802#define QLCNIC_C2H_OPCODE_LAST 142
803
804#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
805#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
806#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
807
808#define QLCNIC_LRO_REQUEST_CLEANUP 4
809
810/* Capabilites received */
811#define QLCNIC_FW_CAPABILITY_BDG (1 << 8)
812#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9)
813#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10)
814
815/* module types */
816#define LINKEVENT_MODULE_NOT_PRESENT 1
817#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
818#define LINKEVENT_MODULE_OPTICAL_SRLR 3
819#define LINKEVENT_MODULE_OPTICAL_LRM 4
820#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
821#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
822#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
823#define LINKEVENT_MODULE_TWINAX 8
824
825#define LINKSPEED_10GBPS 10000
826#define LINKSPEED_1GBPS 1000
827#define LINKSPEED_100MBPS 100
828#define LINKSPEED_10MBPS 10
829
830#define LINKSPEED_ENCODED_10MBPS 0
831#define LINKSPEED_ENCODED_100MBPS 1
832#define LINKSPEED_ENCODED_1GBPS 2
833
834#define LINKEVENT_AUTONEG_DISABLED 0
835#define LINKEVENT_AUTONEG_ENABLED 1
836
837#define LINKEVENT_HALF_DUPLEX 0
838#define LINKEVENT_FULL_DUPLEX 1
839
840#define LINKEVENT_LINKSPEED_MBPS 0
841#define LINKEVENT_LINKSPEED_ENCODED 1
842
843#define AUTO_FW_RESET_ENABLED 0x01
844/* firmware response header:
845 * 63:58 - message type
846 * 57:56 - owner
847 * 55:53 - desc count
848 * 52:48 - reserved
849 * 47:40 - completion id
850 * 39:32 - opcode
851 * 31:16 - error code
852 * 15:00 - reserved
853 */
854#define qlcnic_get_nic_msg_opcode(msg_hdr) \
855 ((msg_hdr >> 32) & 0xFF)
856
857struct qlcnic_fw_msg {
858 union {
859 struct {
860 u64 hdr;
861 u64 body[7];
862 };
863 u64 words[8];
864 };
865};
866
867struct qlcnic_nic_req {
868 __le64 qhdr;
869 __le64 req_hdr;
870 __le64 words[6];
871};
872
873struct qlcnic_mac_req {
874 u8 op;
875 u8 tag;
876 u8 mac_addr[6];
877};
878
879#define QLCNIC_MSI_ENABLED 0x02
880#define QLCNIC_MSIX_ENABLED 0x04
881#define QLCNIC_LRO_ENABLED 0x08
882#define QLCNIC_BRIDGE_ENABLED 0X10
883#define QLCNIC_DIAG_ENABLED 0x20
884#define QLCNIC_IS_MSI_FAMILY(adapter) \
885 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
886
887#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
888#define QLCNIC_MSIX_TBL_SPACE 8192
889#define QLCNIC_PCI_REG_MSIX_TBL 0x44
890
891#define QLCNIC_NETDEV_WEIGHT 128
892#define QLCNIC_ADAPTER_UP_MAGIC 777
893
894#define __QLCNIC_FW_ATTACHED 0
895#define __QLCNIC_DEV_UP 1
896#define __QLCNIC_RESETTING 2
897#define __QLCNIC_START_FW 4
898
899#define QLCNIC_INTERRUPT_TEST 1
900#define QLCNIC_LOOPBACK_TEST 2
901
902struct qlcnic_adapter {
903 struct qlcnic_hardware_context ahw;
904
905 struct net_device *netdev;
906 struct pci_dev *pdev;
907 struct list_head mac_list;
908
909 spinlock_t tx_clean_lock;
910
911 u16 num_txd;
912 u16 num_rxd;
913 u16 num_jumbo_rxd;
914 u16 num_lro_rxd;
915
916 u8 max_rds_rings;
917 u8 max_sds_rings;
918 u8 driver_mismatch;
919 u8 msix_supported;
920 u8 rx_csum;
921 u8 pci_using_dac;
922 u8 portnum;
923 u8 physical_port;
924
925 u8 mc_enabled;
926 u8 max_mc_count;
927 u8 rss_supported;
928 u8 rsrvd1;
929 u8 fw_wait_cnt;
930 u8 fw_fail_cnt;
931 u8 tx_timeo_cnt;
932 u8 need_fw_reset;
933
934 u8 has_link_events;
935 u8 fw_type;
936 u16 tx_context_id;
937 u16 mtu;
938 u16 is_up;
939
940 u16 link_speed;
941 u16 link_duplex;
942 u16 link_autoneg;
943 u16 module_type;
944
945 u32 capabilities;
946 u32 flags;
947 u32 irq;
948 u32 temp;
949
950 u32 int_vec_bit;
951 u32 heartbit;
952
953 u8 dev_state;
954 u8 diag_test;
955 u8 diag_cnt;
956 u8 rsrd1;
957 u16 rsrd2;
958
959 u8 mac_addr[ETH_ALEN];
960
961 struct qlcnic_adapter_stats stats;
962
963 struct qlcnic_recv_context recv_ctx;
964 struct qlcnic_host_tx_ring *tx_ring;
965
966 void __iomem *tgt_mask_reg;
967 void __iomem *tgt_status_reg;
968 void __iomem *crb_int_state_reg;
969 void __iomem *isr_int_vec;
970
971 struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
972
973 struct delayed_work fw_work;
974
975 struct work_struct tx_timeout_task;
976
977 struct qlcnic_nic_intr_coalesce coal;
978
979 unsigned long state;
980 __le32 file_prd_off; /*File fw product offset*/
981 u32 fw_version;
982 const struct firmware *fw;
983};
984
985int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
986int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
987
988u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
989int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
990int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
991int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
992
993#define QLCRD32(adapter, off) \
994 (qlcnic_hw_read_wx_2M(adapter, off))
995#define QLCWR32(adapter, off, val) \
996 (qlcnic_hw_write_wx_2M(adapter, off, val))
997
998int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
999void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
1000
1001#define qlcnic_rom_lock(a) \
1002 qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
1003#define qlcnic_rom_unlock(a) \
1004 qlcnic_pcie_sem_unlock((a), 2)
1005#define qlcnic_phy_lock(a) \
1006 qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
1007#define qlcnic_phy_unlock(a) \
1008 qlcnic_pcie_sem_unlock((a), 3)
1009#define qlcnic_api_lock(a) \
1010 qlcnic_pcie_sem_lock((a), 5, 0)
1011#define qlcnic_api_unlock(a) \
1012 qlcnic_pcie_sem_unlock((a), 5)
1013#define qlcnic_sw_lock(a) \
1014 qlcnic_pcie_sem_lock((a), 6, 0)
1015#define qlcnic_sw_unlock(a) \
1016 qlcnic_pcie_sem_unlock((a), 6)
1017#define crb_win_lock(a) \
1018 qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
1019#define crb_win_unlock(a) \
1020 qlcnic_pcie_sem_unlock((a), 7)
1021
1022int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
1023int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1024int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
1025
1026/* Functions from qlcnic_init.c */
1027int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
1028int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
1029int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
1030void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1031void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1032int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1033
1034int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
1035int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
1036 u8 *bytes, size_t size);
1037int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
1038void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
1039
1040void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
1041
1042int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
1043void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
1044
1045void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1046void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1047
1048int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
1049void qlcnic_watchdog_task(struct work_struct *work);
1050void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1051 struct qlcnic_host_rds_ring *rds_ring);
1052int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1053void qlcnic_set_multi(struct net_device *netdev);
1054void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
1055int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
1056int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
1057int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
1058int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
1059int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
1060void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1061
1062int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1063int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1064int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1065int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
1066int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1067void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1068 struct qlcnic_host_tx_ring *tx_ring);
1069int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
1070void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
1071int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1072
1073/* Functions from qlcnic_main.c */
1074int qlcnic_reset_context(struct qlcnic_adapter *);
1075u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1076 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
1077void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1078int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1079int qlcnic_check_loopback_buff(unsigned char *data);
1080netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1081void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1082
1083/*
1084 * QLOGIC Board information
1085 */
1086
1087#define QLCNIC_MAX_BOARD_NAME_LEN 100
1088struct qlcnic_brdinfo {
1089 unsigned short vendor;
1090 unsigned short device;
1091 unsigned short sub_vendor;
1092 unsigned short sub_device;
1093 char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
1094};
1095
1096static const struct qlcnic_brdinfo qlcnic_boards[] = {
1097 {0x1077, 0x8020, 0x1077, 0x203,
1098 "8200 Series Single Port 10GbE Converged Network Adapter \
1099 (TCP/IP Networking)"},
1100 {0x1077, 0x8020, 0x1077, 0x207,
1101 "8200 Series Dual Port 10GbE Converged Network Adapter \
1102 (TCP/IP Networking)"},
1103 {0x1077, 0x8020, 0x1077, 0x20b,
1104 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
1105 {0x1077, 0x8020, 0x1077, 0x20c,
1106 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
1107 {0x1077, 0x8020, 0x1077, 0x20f,
1108 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1109 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1110};
1111
1112#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
1113
1114static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1115{
1116 smp_mb();
1117 if (tx_ring->producer < tx_ring->sw_consumer)
1118 return tx_ring->sw_consumer - tx_ring->producer;
1119 else
1120 return tx_ring->sw_consumer + tx_ring->num_desc -
1121 tx_ring->producer;
1122}
1123
1124extern const struct ethtool_ops qlcnic_ethtool_ops;
1125
1126#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 00000000000..0a6a39914ae
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,534 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include "qlcnic.h"
26
27static u32
28qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
29{
30 u32 rsp;
31 int timeout = 0;
32
33 do {
34 /* give atleast 1ms for firmware to respond */
35 msleep(1);
36
37 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
38 return QLCNIC_CDRP_RSP_TIMEOUT;
39
40 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
41 } while (!QLCNIC_CDRP_IS_RSP(rsp));
42
43 return rsp;
44}
45
46u32
47qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
48 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
49{
50 u32 rsp;
51 u32 signature;
52 u32 rcode = QLCNIC_RCODE_SUCCESS;
53 struct pci_dev *pdev = adapter->pdev;
54
55 signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
56
57 /* Acquire semaphore before accessing CRB */
58 if (qlcnic_api_lock(adapter))
59 return QLCNIC_RCODE_TIMEOUT;
60
61 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
62 QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
63 QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
64 QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
65 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
66
67 rsp = qlcnic_poll_rsp(adapter);
68
69 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
70 dev_err(&pdev->dev, "card response timeout.\n");
71 rcode = QLCNIC_RCODE_TIMEOUT;
72 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
73 rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
74 dev_err(&pdev->dev, "failed card response code:0x%x\n",
75 rcode);
76 }
77
78 /* Release semaphore */
79 qlcnic_api_unlock(adapter);
80
81 return rcode;
82}
83
84int
85qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
86{
87 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
88
89 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
90 if (qlcnic_issue_cmd(adapter,
91 adapter->ahw.pci_func,
92 QLCHAL_VERSION,
93 recv_ctx->context_id,
94 mtu,
95 0,
96 QLCNIC_CDRP_CMD_SET_MTU)) {
97
98 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
99 return -EIO;
100 }
101 }
102
103 return 0;
104}
105
106static int
107qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
108{
109 void *addr;
110 struct qlcnic_hostrq_rx_ctx *prq;
111 struct qlcnic_cardrsp_rx_ctx *prsp;
112 struct qlcnic_hostrq_rds_ring *prq_rds;
113 struct qlcnic_hostrq_sds_ring *prq_sds;
114 struct qlcnic_cardrsp_rds_ring *prsp_rds;
115 struct qlcnic_cardrsp_sds_ring *prsp_sds;
116 struct qlcnic_host_rds_ring *rds_ring;
117 struct qlcnic_host_sds_ring *sds_ring;
118
119 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
120 u64 phys_addr;
121
122 int i, nrds_rings, nsds_rings;
123 size_t rq_size, rsp_size;
124 u32 cap, reg, val;
125 int err;
126
127 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
128
129 nrds_rings = adapter->max_rds_rings;
130 nsds_rings = adapter->max_sds_rings;
131
132 rq_size =
133 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
134 nsds_rings);
135 rsp_size =
136 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
137 nsds_rings);
138
139 addr = pci_alloc_consistent(adapter->pdev,
140 rq_size, &hostrq_phys_addr);
141 if (addr == NULL)
142 return -ENOMEM;
143 prq = (struct qlcnic_hostrq_rx_ctx *)addr;
144
145 addr = pci_alloc_consistent(adapter->pdev,
146 rsp_size, &cardrsp_phys_addr);
147 if (addr == NULL) {
148 err = -ENOMEM;
149 goto out_free_rq;
150 }
151 prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
152
153 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
154
155 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
156 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
157
158 prq->capabilities[0] = cpu_to_le32(cap);
159 prq->host_int_crb_mode =
160 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
161 prq->host_rds_crb_mode =
162 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
163
164 prq->num_rds_rings = cpu_to_le16(nrds_rings);
165 prq->num_sds_rings = cpu_to_le16(nsds_rings);
166 prq->rds_ring_offset = cpu_to_le32(0);
167
168 val = le32_to_cpu(prq->rds_ring_offset) +
169 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
170 prq->sds_ring_offset = cpu_to_le32(val);
171
172 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
173 le32_to_cpu(prq->rds_ring_offset));
174
175 for (i = 0; i < nrds_rings; i++) {
176
177 rds_ring = &recv_ctx->rds_rings[i];
178
179 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
180 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
181 prq_rds[i].ring_kind = cpu_to_le32(i);
182 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
183 }
184
185 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
186 le32_to_cpu(prq->sds_ring_offset));
187
188 for (i = 0; i < nsds_rings; i++) {
189
190 sds_ring = &recv_ctx->sds_rings[i];
191
192 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
193 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
194 prq_sds[i].msi_index = cpu_to_le16(i);
195 }
196
197 phys_addr = hostrq_phys_addr;
198 err = qlcnic_issue_cmd(adapter,
199 adapter->ahw.pci_func,
200 QLCHAL_VERSION,
201 (u32)(phys_addr >> 32),
202 (u32)(phys_addr & 0xffffffff),
203 rq_size,
204 QLCNIC_CDRP_CMD_CREATE_RX_CTX);
205 if (err) {
206 dev_err(&adapter->pdev->dev,
207 "Failed to create rx ctx in firmware%d\n", err);
208 goto out_free_rsp;
209 }
210
211
212 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
213 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
214
215 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
216 rds_ring = &recv_ctx->rds_rings[i];
217
218 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
219 rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
220 QLCNIC_REG(reg - 0x200));
221 }
222
223 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
224 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
225
226 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
227 sds_ring = &recv_ctx->sds_rings[i];
228
229 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
230 sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
231 QLCNIC_REG(reg - 0x200));
232
233 reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
234 sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
235 QLCNIC_REG(reg - 0x200));
236 }
237
238 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
239 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
240 recv_ctx->virt_port = prsp->virt_port;
241
242out_free_rsp:
243 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
244out_free_rq:
245 pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
246 return err;
247}
248
249static void
250qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
251{
252 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
253
254 if (qlcnic_issue_cmd(adapter,
255 adapter->ahw.pci_func,
256 QLCHAL_VERSION,
257 recv_ctx->context_id,
258 QLCNIC_DESTROY_CTX_RESET,
259 0,
260 QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
261
262 dev_err(&adapter->pdev->dev,
263 "Failed to destroy rx ctx in firmware\n");
264 }
265}
266
267static int
268qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
269{
270 struct qlcnic_hostrq_tx_ctx *prq;
271 struct qlcnic_hostrq_cds_ring *prq_cds;
272 struct qlcnic_cardrsp_tx_ctx *prsp;
273 void *rq_addr, *rsp_addr;
274 size_t rq_size, rsp_size;
275 u32 temp;
276 int err;
277 u64 phys_addr;
278 dma_addr_t rq_phys_addr, rsp_phys_addr;
279 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
280
281 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
282 rq_addr = pci_alloc_consistent(adapter->pdev,
283 rq_size, &rq_phys_addr);
284 if (!rq_addr)
285 return -ENOMEM;
286
287 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
288 rsp_addr = pci_alloc_consistent(adapter->pdev,
289 rsp_size, &rsp_phys_addr);
290 if (!rsp_addr) {
291 err = -ENOMEM;
292 goto out_free_rq;
293 }
294
295 memset(rq_addr, 0, rq_size);
296 prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
297
298 memset(rsp_addr, 0, rsp_size);
299 prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
300
301 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
302
303 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
304 QLCNIC_CAP0_LSO);
305 prq->capabilities[0] = cpu_to_le32(temp);
306
307 prq->host_int_crb_mode =
308 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
309
310 prq->interrupt_ctl = 0;
311 prq->msi_index = 0;
312 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
313
314 prq_cds = &prq->cds_ring;
315
316 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
317 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
318
319 phys_addr = rq_phys_addr;
320 err = qlcnic_issue_cmd(adapter,
321 adapter->ahw.pci_func,
322 QLCHAL_VERSION,
323 (u32)(phys_addr >> 32),
324 ((u32)phys_addr & 0xffffffff),
325 rq_size,
326 QLCNIC_CDRP_CMD_CREATE_TX_CTX);
327
328 if (err == QLCNIC_RCODE_SUCCESS) {
329 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
330 tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
331 QLCNIC_REG(temp - 0x200));
332
333 adapter->tx_context_id =
334 le16_to_cpu(prsp->context_id);
335 } else {
336 dev_err(&adapter->pdev->dev,
337 "Failed to create tx ctx in firmware%d\n", err);
338 err = -EIO;
339 }
340
341 pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
342
343out_free_rq:
344 pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
345
346 return err;
347}
348
349static void
350qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
351{
352 if (qlcnic_issue_cmd(adapter,
353 adapter->ahw.pci_func,
354 QLCHAL_VERSION,
355 adapter->tx_context_id,
356 QLCNIC_DESTROY_CTX_RESET,
357 0,
358 QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
359
360 dev_err(&adapter->pdev->dev,
361 "Failed to destroy tx ctx in firmware\n");
362 }
363}
364
365int
366qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
367{
368
369 if (qlcnic_issue_cmd(adapter,
370 adapter->ahw.pci_func,
371 QLCHAL_VERSION,
372 reg,
373 0,
374 0,
375 QLCNIC_CDRP_CMD_READ_PHY)) {
376
377 return -EIO;
378 }
379
380 return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
381}
382
383int
384qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
385{
386 return qlcnic_issue_cmd(adapter,
387 adapter->ahw.pci_func,
388 QLCHAL_VERSION,
389 reg,
390 val,
391 0,
392 QLCNIC_CDRP_CMD_WRITE_PHY);
393}
394
395int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
396{
397 void *addr;
398 int err;
399 int ring;
400 struct qlcnic_recv_context *recv_ctx;
401 struct qlcnic_host_rds_ring *rds_ring;
402 struct qlcnic_host_sds_ring *sds_ring;
403 struct qlcnic_host_tx_ring *tx_ring;
404
405 struct pci_dev *pdev = adapter->pdev;
406
407 recv_ctx = &adapter->recv_ctx;
408 tx_ring = adapter->tx_ring;
409
410 tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
411 &tx_ring->hw_cons_phys_addr);
412 if (tx_ring->hw_consumer == NULL) {
413 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
414 return -ENOMEM;
415 }
416 *(tx_ring->hw_consumer) = 0;
417
418 /* cmd desc ring */
419 addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
420 &tx_ring->phys_addr);
421
422 if (addr == NULL) {
423 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
424 return -ENOMEM;
425 }
426
427 tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
428
429 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
430 rds_ring = &recv_ctx->rds_rings[ring];
431 addr = pci_alloc_consistent(adapter->pdev,
432 RCV_DESC_RINGSIZE(rds_ring),
433 &rds_ring->phys_addr);
434 if (addr == NULL) {
435 dev_err(&pdev->dev,
436 "failed to allocate rds ring [%d]\n", ring);
437 err = -ENOMEM;
438 goto err_out_free;
439 }
440 rds_ring->desc_head = (struct rcv_desc *)addr;
441
442 }
443
444 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
445 sds_ring = &recv_ctx->sds_rings[ring];
446
447 addr = pci_alloc_consistent(adapter->pdev,
448 STATUS_DESC_RINGSIZE(sds_ring),
449 &sds_ring->phys_addr);
450 if (addr == NULL) {
451 dev_err(&pdev->dev,
452 "failed to allocate sds ring [%d]\n", ring);
453 err = -ENOMEM;
454 goto err_out_free;
455 }
456 sds_ring->desc_head = (struct status_desc *)addr;
457 }
458
459
460 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
461 if (err)
462 goto err_out_free;
463 err = qlcnic_fw_cmd_create_tx_ctx(adapter);
464 if (err)
465 goto err_out_free;
466
467 set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
468 return 0;
469
470err_out_free:
471 qlcnic_free_hw_resources(adapter);
472 return err;
473}
474
475void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
476{
477 struct qlcnic_recv_context *recv_ctx;
478 struct qlcnic_host_rds_ring *rds_ring;
479 struct qlcnic_host_sds_ring *sds_ring;
480 struct qlcnic_host_tx_ring *tx_ring;
481 int ring;
482
483
484 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
485 qlcnic_fw_cmd_destroy_rx_ctx(adapter);
486 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
487
488 /* Allow dma queues to drain after context reset */
489 msleep(20);
490 }
491
492 recv_ctx = &adapter->recv_ctx;
493
494 tx_ring = adapter->tx_ring;
495 if (tx_ring->hw_consumer != NULL) {
496 pci_free_consistent(adapter->pdev,
497 sizeof(u32),
498 tx_ring->hw_consumer,
499 tx_ring->hw_cons_phys_addr);
500 tx_ring->hw_consumer = NULL;
501 }
502
503 if (tx_ring->desc_head != NULL) {
504 pci_free_consistent(adapter->pdev,
505 TX_DESC_RINGSIZE(tx_ring),
506 tx_ring->desc_head, tx_ring->phys_addr);
507 tx_ring->desc_head = NULL;
508 }
509
510 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
511 rds_ring = &recv_ctx->rds_rings[ring];
512
513 if (rds_ring->desc_head != NULL) {
514 pci_free_consistent(adapter->pdev,
515 RCV_DESC_RINGSIZE(rds_ring),
516 rds_ring->desc_head,
517 rds_ring->phys_addr);
518 rds_ring->desc_head = NULL;
519 }
520 }
521
522 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
523 sds_ring = &recv_ctx->sds_rings[ring];
524
525 if (sds_ring->desc_head != NULL) {
526 pci_free_consistent(adapter->pdev,
527 STATUS_DESC_RINGSIZE(sds_ring),
528 sds_ring->desc_head,
529 sds_ring->phys_addr);
530 sds_ring->desc_head = NULL;
531 }
532 }
533}
534
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 00000000000..8da6ec8c13b
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1015 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/types.h>
26#include <linux/delay.h>
27#include <linux/pci.h>
28#include <linux/io.h>
29#include <linux/netdevice.h>
30#include <linux/ethtool.h>
31
32#include "qlcnic.h"
33
34struct qlcnic_stats {
35 char stat_string[ETH_GSTRING_LEN];
36 int sizeof_stat;
37 int stat_offset;
38};
39
40#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
41#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
42
43static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
44 {"xmit_called",
45 QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
46 {"xmit_finished",
47 QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
48 {"rx_dropped",
49 QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
50 {"tx_dropped",
51 QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
52 {"csummed",
53 QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
54 {"rx_pkts",
55 QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
56 {"lro_pkts",
57 QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
58 {"rx_bytes",
59 QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
60 {"tx_bytes",
61 QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
62};
63
64#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
65
66static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
67 "Register_Test_on_offline",
68 "Link_Test_on_offline",
69 "Interrupt_Test_offline",
70 "Loopback_Test_offline"
71};
72
73#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
74
75#define QLCNIC_RING_REGS_COUNT 20
76#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
77#define QLCNIC_MAX_EEPROM_LEN 1024
78
79static const u32 diag_registers[] = {
80 CRB_CMDPEG_STATE,
81 CRB_RCVPEG_STATE,
82 CRB_XG_STATE_P3,
83 CRB_FW_CAPABILITIES_1,
84 ISR_INT_STATE_REG,
85 QLCNIC_CRB_DEV_REF_COUNT,
86 QLCNIC_CRB_DEV_STATE,
87 QLCNIC_CRB_DRV_STATE,
88 QLCNIC_CRB_DRV_SCRATCH,
89 QLCNIC_CRB_DEV_PARTITION_INFO,
90 QLCNIC_CRB_DRV_IDC_VER,
91 QLCNIC_PEG_ALIVE_COUNTER,
92 QLCNIC_PEG_HALT_STATUS1,
93 QLCNIC_PEG_HALT_STATUS2,
94 QLCNIC_CRB_PEG_NET_0+0x3c,
95 QLCNIC_CRB_PEG_NET_1+0x3c,
96 QLCNIC_CRB_PEG_NET_2+0x3c,
97 QLCNIC_CRB_PEG_NET_4+0x3c,
98 -1
99};
100
101static int qlcnic_get_regs_len(struct net_device *dev)
102{
103 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
104}
105
106static int qlcnic_get_eeprom_len(struct net_device *dev)
107{
108 return QLCNIC_FLASH_TOTAL_SIZE;
109}
110
111static void
112qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
113{
114 struct qlcnic_adapter *adapter = netdev_priv(dev);
115 u32 fw_major, fw_minor, fw_build;
116
117 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
118 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
119 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
120 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
121
122 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
123 strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
124 strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
125}
126
127static int
128qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
129{
130 struct qlcnic_adapter *adapter = netdev_priv(dev);
131 int check_sfp_module = 0;
132 u16 pcifn = adapter->ahw.pci_func;
133
134 /* read which mode */
135 if (adapter->ahw.port_type == QLCNIC_GBE) {
136 ecmd->supported = (SUPPORTED_10baseT_Half |
137 SUPPORTED_10baseT_Full |
138 SUPPORTED_100baseT_Half |
139 SUPPORTED_100baseT_Full |
140 SUPPORTED_1000baseT_Half |
141 SUPPORTED_1000baseT_Full);
142
143 ecmd->advertising = (ADVERTISED_100baseT_Half |
144 ADVERTISED_100baseT_Full |
145 ADVERTISED_1000baseT_Half |
146 ADVERTISED_1000baseT_Full);
147
148 ecmd->speed = adapter->link_speed;
149 ecmd->duplex = adapter->link_duplex;
150 ecmd->autoneg = adapter->link_autoneg;
151
152 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
153 u32 val;
154
155 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
156 if (val == QLCNIC_PORT_MODE_802_3_AP) {
157 ecmd->supported = SUPPORTED_1000baseT_Full;
158 ecmd->advertising = ADVERTISED_1000baseT_Full;
159 } else {
160 ecmd->supported = SUPPORTED_10000baseT_Full;
161 ecmd->advertising = ADVERTISED_10000baseT_Full;
162 }
163
164 if (netif_running(dev) && adapter->has_link_events) {
165 ecmd->speed = adapter->link_speed;
166 ecmd->autoneg = adapter->link_autoneg;
167 ecmd->duplex = adapter->link_duplex;
168 goto skip;
169 }
170
171 val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
172 ecmd->speed = P3_LINK_SPEED_MHZ *
173 P3_LINK_SPEED_VAL(pcifn, val);
174 ecmd->duplex = DUPLEX_FULL;
175 ecmd->autoneg = AUTONEG_DISABLE;
176 } else
177 return -EIO;
178
179skip:
180 ecmd->phy_address = adapter->physical_port;
181 ecmd->transceiver = XCVR_EXTERNAL;
182
183 switch (adapter->ahw.board_type) {
184 case QLCNIC_BRDTYPE_P3_REF_QG:
185 case QLCNIC_BRDTYPE_P3_4_GB:
186 case QLCNIC_BRDTYPE_P3_4_GB_MM:
187
188 ecmd->supported |= SUPPORTED_Autoneg;
189 ecmd->advertising |= ADVERTISED_Autoneg;
190 case QLCNIC_BRDTYPE_P3_10G_CX4:
191 case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
192 case QLCNIC_BRDTYPE_P3_10000_BASE_T:
193 ecmd->supported |= SUPPORTED_TP;
194 ecmd->advertising |= ADVERTISED_TP;
195 ecmd->port = PORT_TP;
196 ecmd->autoneg = adapter->link_autoneg;
197 break;
198 case QLCNIC_BRDTYPE_P3_IMEZ:
199 case QLCNIC_BRDTYPE_P3_XG_LOM:
200 case QLCNIC_BRDTYPE_P3_HMEZ:
201 ecmd->supported |= SUPPORTED_MII;
202 ecmd->advertising |= ADVERTISED_MII;
203 ecmd->port = PORT_MII;
204 ecmd->autoneg = AUTONEG_DISABLE;
205 break;
206 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
207 case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
208 case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
209 ecmd->advertising |= ADVERTISED_TP;
210 ecmd->supported |= SUPPORTED_TP;
211 check_sfp_module = netif_running(dev) &&
212 adapter->has_link_events;
213 case QLCNIC_BRDTYPE_P3_10G_XFP:
214 ecmd->supported |= SUPPORTED_FIBRE;
215 ecmd->advertising |= ADVERTISED_FIBRE;
216 ecmd->port = PORT_FIBRE;
217 ecmd->autoneg = AUTONEG_DISABLE;
218 break;
219 case QLCNIC_BRDTYPE_P3_10G_TP:
220 if (adapter->ahw.port_type == QLCNIC_XGBE) {
221 ecmd->autoneg = AUTONEG_DISABLE;
222 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
223 ecmd->advertising |=
224 (ADVERTISED_FIBRE | ADVERTISED_TP);
225 ecmd->port = PORT_FIBRE;
226 check_sfp_module = netif_running(dev) &&
227 adapter->has_link_events;
228 } else {
229 ecmd->autoneg = AUTONEG_ENABLE;
230 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
231 ecmd->advertising |=
232 (ADVERTISED_TP | ADVERTISED_Autoneg);
233 ecmd->port = PORT_TP;
234 }
235 break;
236 default:
237 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
238 adapter->ahw.board_type);
239 return -EIO;
240 }
241
242 if (check_sfp_module) {
243 switch (adapter->module_type) {
244 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
245 case LINKEVENT_MODULE_OPTICAL_SRLR:
246 case LINKEVENT_MODULE_OPTICAL_LRM:
247 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
248 ecmd->port = PORT_FIBRE;
249 break;
250 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
251 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
252 case LINKEVENT_MODULE_TWINAX:
253 ecmd->port = PORT_TP;
254 break;
255 default:
256 ecmd->port = PORT_OTHER;
257 }
258 }
259
260 return 0;
261}
262
263static int
264qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
265{
266 struct qlcnic_adapter *adapter = netdev_priv(dev);
267 __u32 status;
268
269 /* read which mode */
270 if (adapter->ahw.port_type == QLCNIC_GBE) {
271 /* autonegotiation */
272 if (qlcnic_fw_cmd_set_phy(adapter,
273 QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
274 ecmd->autoneg) != 0)
275 return -EIO;
276 else
277 adapter->link_autoneg = ecmd->autoneg;
278
279 if (qlcnic_fw_cmd_query_phy(adapter,
280 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
281 &status) != 0)
282 return -EIO;
283
284 switch (ecmd->speed) {
285 case SPEED_10:
286 qlcnic_set_phy_speed(status, 0);
287 break;
288 case SPEED_100:
289 qlcnic_set_phy_speed(status, 1);
290 break;
291 case SPEED_1000:
292 qlcnic_set_phy_speed(status, 2);
293 break;
294 }
295
296 if (ecmd->duplex == DUPLEX_HALF)
297 qlcnic_clear_phy_duplex(status);
298 if (ecmd->duplex == DUPLEX_FULL)
299 qlcnic_set_phy_duplex(status);
300 if (qlcnic_fw_cmd_set_phy(adapter,
301 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
302 *((int *)&status)) != 0)
303 return -EIO;
304 else {
305 adapter->link_speed = ecmd->speed;
306 adapter->link_duplex = ecmd->duplex;
307 }
308 } else
309 return -EOPNOTSUPP;
310
311 if (!netif_running(dev))
312 return 0;
313
314 dev->netdev_ops->ndo_stop(dev);
315 return dev->netdev_ops->ndo_open(dev);
316}
317
318static void
319qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
320{
321 struct qlcnic_adapter *adapter = netdev_priv(dev);
322 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
323 struct qlcnic_host_sds_ring *sds_ring;
324 u32 *regs_buff = p;
325 int ring, i = 0;
326
327 memset(p, 0, qlcnic_get_regs_len(dev));
328 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
329 (adapter->pdev)->device;
330
331 for (i = 0; diag_registers[i] != -1; i++)
332 regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
333
334 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
335 return;
336
337 regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
338
339 regs_buff[i++] = 1; /* No. of tx ring */
340 regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
341 regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
342
343 regs_buff[i++] = 2; /* No. of rx ring */
344 regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
345 regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
346
347 regs_buff[i++] = adapter->max_sds_rings;
348
349 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
350 sds_ring = &(recv_ctx->sds_rings[ring]);
351 regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
352 }
353}
354
355static u32 qlcnic_test_link(struct net_device *dev)
356{
357 struct qlcnic_adapter *adapter = netdev_priv(dev);
358 u32 val;
359
360 val = QLCRD32(adapter, CRB_XG_STATE_P3);
361 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
362 return (val == XG_LINK_UP_P3) ? 0 : 1;
363}
364
365static int
366qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
367 u8 *bytes)
368{
369 struct qlcnic_adapter *adapter = netdev_priv(dev);
370 int offset;
371 int ret;
372
373 if (eeprom->len == 0)
374 return -EINVAL;
375
376 eeprom->magic = (adapter->pdev)->vendor |
377 ((adapter->pdev)->device << 16);
378 offset = eeprom->offset;
379
380 ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
381 eeprom->len);
382 if (ret < 0)
383 return ret;
384
385 return 0;
386}
387
388static void
389qlcnic_get_ringparam(struct net_device *dev,
390 struct ethtool_ringparam *ring)
391{
392 struct qlcnic_adapter *adapter = netdev_priv(dev);
393
394 ring->rx_pending = adapter->num_rxd;
395 ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
396 ring->rx_jumbo_pending += adapter->num_lro_rxd;
397 ring->tx_pending = adapter->num_txd;
398
399 if (adapter->ahw.port_type == QLCNIC_GBE) {
400 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
401 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
402 } else {
403 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
404 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
405 }
406
407 ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
408
409 ring->rx_mini_max_pending = 0;
410 ring->rx_mini_pending = 0;
411}
412
413static u32
414qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
415{
416 u32 num_desc;
417 num_desc = max(val, min);
418 num_desc = min(num_desc, max);
419 num_desc = roundup_pow_of_two(num_desc);
420
421 if (val != num_desc) {
422 printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
423 qlcnic_driver_name, r_name, num_desc, val);
424 }
425
426 return num_desc;
427}
428
429static int
430qlcnic_set_ringparam(struct net_device *dev,
431 struct ethtool_ringparam *ring)
432{
433 struct qlcnic_adapter *adapter = netdev_priv(dev);
434 u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
435 u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
436 u16 num_rxd, num_jumbo_rxd, num_txd;
437
438
439 if (ring->rx_mini_pending)
440 return -EOPNOTSUPP;
441
442 if (adapter->ahw.port_type == QLCNIC_GBE) {
443 max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
444 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
445 }
446
447 num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
448 MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
449
450 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
451 MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
452
453 num_txd = qlcnic_validate_ringparam(ring->tx_pending,
454 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
455
456 if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
457 num_jumbo_rxd == adapter->num_jumbo_rxd)
458 return 0;
459
460 adapter->num_rxd = num_rxd;
461 adapter->num_jumbo_rxd = num_jumbo_rxd;
462 adapter->num_txd = num_txd;
463
464 return qlcnic_reset_context(adapter);
465}
466
467static void
468qlcnic_get_pauseparam(struct net_device *netdev,
469 struct ethtool_pauseparam *pause)
470{
471 struct qlcnic_adapter *adapter = netdev_priv(netdev);
472 int port = adapter->physical_port;
473 __u32 val;
474
475 if (adapter->ahw.port_type == QLCNIC_GBE) {
476 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
477 return;
478 /* get flow control settings */
479 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
480 pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
481 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
482 switch (port) {
483 case 0:
484 pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
485 break;
486 case 1:
487 pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
488 break;
489 case 2:
490 pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
491 break;
492 case 3:
493 default:
494 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
495 break;
496 }
497 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
498 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
499 return;
500 pause->rx_pause = 1;
501 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
502 if (port == 0)
503 pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
504 else
505 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
506 } else {
507 dev_err(&netdev->dev, "Unknown board type: %x\n",
508 adapter->ahw.port_type);
509 }
510}
511
512static int
513qlcnic_set_pauseparam(struct net_device *netdev,
514 struct ethtool_pauseparam *pause)
515{
516 struct qlcnic_adapter *adapter = netdev_priv(netdev);
517 int port = adapter->physical_port;
518 __u32 val;
519
520 /* read mode */
521 if (adapter->ahw.port_type == QLCNIC_GBE) {
522 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
523 return -EIO;
524 /* set flow control */
525 val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
526
527 if (pause->rx_pause)
528 qlcnic_gb_rx_flowctl(val);
529 else
530 qlcnic_gb_unset_rx_flowctl(val);
531
532 QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
533 val);
534 /* set autoneg */
535 val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
536 switch (port) {
537 case 0:
538 if (pause->tx_pause)
539 qlcnic_gb_unset_gb0_mask(val);
540 else
541 qlcnic_gb_set_gb0_mask(val);
542 break;
543 case 1:
544 if (pause->tx_pause)
545 qlcnic_gb_unset_gb1_mask(val);
546 else
547 qlcnic_gb_set_gb1_mask(val);
548 break;
549 case 2:
550 if (pause->tx_pause)
551 qlcnic_gb_unset_gb2_mask(val);
552 else
553 qlcnic_gb_set_gb2_mask(val);
554 break;
555 case 3:
556 default:
557 if (pause->tx_pause)
558 qlcnic_gb_unset_gb3_mask(val);
559 else
560 qlcnic_gb_set_gb3_mask(val);
561 break;
562 }
563 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
564 } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
565 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
566 return -EIO;
567 val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
568 if (port == 0) {
569 if (pause->tx_pause)
570 qlcnic_xg_unset_xg0_mask(val);
571 else
572 qlcnic_xg_set_xg0_mask(val);
573 } else {
574 if (pause->tx_pause)
575 qlcnic_xg_unset_xg1_mask(val);
576 else
577 qlcnic_xg_set_xg1_mask(val);
578 }
579 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
580 } else {
581 dev_err(&netdev->dev, "Unknown board type: %x\n",
582 adapter->ahw.port_type);
583 }
584 return 0;
585}
586
587static int qlcnic_reg_test(struct net_device *dev)
588{
589 struct qlcnic_adapter *adapter = netdev_priv(dev);
590 u32 data_read, data_written;
591
592 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
593 if ((data_read & 0xffff) != adapter->pdev->vendor)
594 return 1;
595
596 data_written = (u32)0xa5a5a5a5;
597
598 QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
599 data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
600 if (data_written != data_read)
601 return 1;
602
603 return 0;
604}
605
606static int qlcnic_get_sset_count(struct net_device *dev, int sset)
607{
608 switch (sset) {
609 case ETH_SS_TEST:
610 return QLCNIC_TEST_LEN;
611 case ETH_SS_STATS:
612 return QLCNIC_STATS_LEN;
613 default:
614 return -EOPNOTSUPP;
615 }
616}
617
618#define QLC_ILB_PKT_SIZE 64
619
620static void qlcnic_create_loopback_buff(unsigned char *data)
621{
622 unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
623 memset(data, 0x4e, QLC_ILB_PKT_SIZE);
624 memset(data, 0xff, 12);
625 memcpy(data + 12, random_data, sizeof(random_data));
626}
627
628int qlcnic_check_loopback_buff(unsigned char *data)
629{
630 unsigned char buff[QLC_ILB_PKT_SIZE];
631 qlcnic_create_loopback_buff(buff);
632 return memcmp(data, buff, QLC_ILB_PKT_SIZE);
633}
634
635static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
636{
637 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
638 struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
639 struct sk_buff *skb;
640 int i;
641
642 for (i = 0; i < 16; i++) {
643 skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
644 qlcnic_create_loopback_buff(skb->data);
645 skb_put(skb, QLC_ILB_PKT_SIZE);
646
647 adapter->diag_cnt = 0;
648
649 qlcnic_xmit_frame(skb, adapter->netdev);
650
651 msleep(5);
652
653 qlcnic_process_rcv_ring_diag(sds_ring);
654
655 dev_kfree_skb_any(skb);
656 if (!adapter->diag_cnt)
657 return -1;
658 }
659 return 0;
660}
661
662static int qlcnic_loopback_test(struct net_device *netdev)
663{
664 struct qlcnic_adapter *adapter = netdev_priv(netdev);
665 int max_sds_rings = adapter->max_sds_rings;
666 int ret;
667
668 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
669 return -EIO;
670
671 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
672 if (ret)
673 goto clear_it;
674
675 ret = qlcnic_set_ilb_mode(adapter);
676 if (ret)
677 goto done;
678
679 ret = qlcnic_do_ilb_test(adapter);
680
681 qlcnic_clear_ilb_mode(adapter);
682
683done:
684 qlcnic_diag_free_res(netdev, max_sds_rings);
685
686clear_it:
687 adapter->max_sds_rings = max_sds_rings;
688 clear_bit(__QLCNIC_RESETTING, &adapter->state);
689 return ret;
690}
691
692static int qlcnic_irq_test(struct net_device *netdev)
693{
694 struct qlcnic_adapter *adapter = netdev_priv(netdev);
695 int max_sds_rings = adapter->max_sds_rings;
696 int ret;
697
698 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
699 return -EIO;
700
701 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
702 if (ret)
703 goto clear_it;
704
705 adapter->diag_cnt = 0;
706 ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
707 QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011);
708 if (ret)
709 goto done;
710
711 msleep(10);
712
713 ret = !adapter->diag_cnt;
714
715done:
716 qlcnic_diag_free_res(netdev, max_sds_rings);
717
718clear_it:
719 adapter->max_sds_rings = max_sds_rings;
720 clear_bit(__QLCNIC_RESETTING, &adapter->state);
721 return ret;
722}
723
724static void
725qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
726 u64 *data)
727{
728 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
729
730 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
731 data[2] = qlcnic_irq_test(dev);
732 if (data[2])
733 eth_test->flags |= ETH_TEST_FL_FAILED;
734
735 data[3] = qlcnic_loopback_test(dev);
736 if (data[3])
737 eth_test->flags |= ETH_TEST_FL_FAILED;
738
739 }
740
741 data[0] = qlcnic_reg_test(dev);
742 if (data[0])
743 eth_test->flags |= ETH_TEST_FL_FAILED;
744
745 /* link test */
746 data[1] = (u64) qlcnic_test_link(dev);
747 if (data[1])
748 eth_test->flags |= ETH_TEST_FL_FAILED;
749}
750
751static void
752qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
753{
754 int index;
755
756 switch (stringset) {
757 case ETH_SS_TEST:
758 memcpy(data, *qlcnic_gstrings_test,
759 QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
760 break;
761 case ETH_SS_STATS:
762 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
763 memcpy(data + index * ETH_GSTRING_LEN,
764 qlcnic_gstrings_stats[index].stat_string,
765 ETH_GSTRING_LEN);
766 }
767 break;
768 }
769}
770
771static void
772qlcnic_get_ethtool_stats(struct net_device *dev,
773 struct ethtool_stats *stats, u64 * data)
774{
775 struct qlcnic_adapter *adapter = netdev_priv(dev);
776 int index;
777
778 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
779 char *p =
780 (char *)adapter +
781 qlcnic_gstrings_stats[index].stat_offset;
782 data[index] =
783 (qlcnic_gstrings_stats[index].sizeof_stat ==
784 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
785 }
786}
787
788static u32 qlcnic_get_rx_csum(struct net_device *dev)
789{
790 struct qlcnic_adapter *adapter = netdev_priv(dev);
791 return adapter->rx_csum;
792}
793
794static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
795{
796 struct qlcnic_adapter *adapter = netdev_priv(dev);
797 adapter->rx_csum = !!data;
798 return 0;
799}
800
801static u32 qlcnic_get_tso(struct net_device *dev)
802{
803 return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
804}
805
806static int qlcnic_set_tso(struct net_device *dev, u32 data)
807{
808 if (data)
809 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
810 else
811 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
812
813 return 0;
814}
815
816static int qlcnic_blink_led(struct net_device *dev, u32 val)
817{
818 struct qlcnic_adapter *adapter = netdev_priv(dev);
819 int ret;
820
821 ret = qlcnic_config_led(adapter, 1, 0xf);
822 if (ret) {
823 dev_err(&adapter->pdev->dev,
824 "Failed to set LED blink state.\n");
825 return ret;
826 }
827
828 msleep_interruptible(val * 1000);
829
830 ret = qlcnic_config_led(adapter, 0, 0xf);
831 if (ret) {
832 dev_err(&adapter->pdev->dev,
833 "Failed to reset LED blink state.\n");
834 return ret;
835 }
836
837 return 0;
838}
839
840static void
841qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
842{
843 struct qlcnic_adapter *adapter = netdev_priv(dev);
844 u32 wol_cfg;
845
846 wol->supported = 0;
847 wol->wolopts = 0;
848
849 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
850 if (wol_cfg & (1UL << adapter->portnum))
851 wol->supported |= WAKE_MAGIC;
852
853 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
854 if (wol_cfg & (1UL << adapter->portnum))
855 wol->wolopts |= WAKE_MAGIC;
856}
857
858static int
859qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
860{
861 struct qlcnic_adapter *adapter = netdev_priv(dev);
862 u32 wol_cfg;
863
864 if (wol->wolopts & ~WAKE_MAGIC)
865 return -EOPNOTSUPP;
866
867 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
868 if (!(wol_cfg & (1 << adapter->portnum)))
869 return -EOPNOTSUPP;
870
871 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
872 if (wol->wolopts & WAKE_MAGIC)
873 wol_cfg |= 1UL << adapter->portnum;
874 else
875 wol_cfg &= ~(1UL << adapter->portnum);
876
877 QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
878
879 return 0;
880}
881
882/*
883 * Set the coalescing parameters. Currently only normal is supported.
884 * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
885 * firmware coalescing to default.
886 */
887static int qlcnic_set_intr_coalesce(struct net_device *netdev,
888 struct ethtool_coalesce *ethcoal)
889{
890 struct qlcnic_adapter *adapter = netdev_priv(netdev);
891
892 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
893 return -EINVAL;
894
895 /*
896 * Return Error if unsupported values or
897 * unsupported parameters are set.
898 */
899 if (ethcoal->rx_coalesce_usecs > 0xffff ||
900 ethcoal->rx_max_coalesced_frames > 0xffff ||
901 ethcoal->tx_coalesce_usecs > 0xffff ||
902 ethcoal->tx_max_coalesced_frames > 0xffff ||
903 ethcoal->rx_coalesce_usecs_irq ||
904 ethcoal->rx_max_coalesced_frames_irq ||
905 ethcoal->tx_coalesce_usecs_irq ||
906 ethcoal->tx_max_coalesced_frames_irq ||
907 ethcoal->stats_block_coalesce_usecs ||
908 ethcoal->use_adaptive_rx_coalesce ||
909 ethcoal->use_adaptive_tx_coalesce ||
910 ethcoal->pkt_rate_low ||
911 ethcoal->rx_coalesce_usecs_low ||
912 ethcoal->rx_max_coalesced_frames_low ||
913 ethcoal->tx_coalesce_usecs_low ||
914 ethcoal->tx_max_coalesced_frames_low ||
915 ethcoal->pkt_rate_high ||
916 ethcoal->rx_coalesce_usecs_high ||
917 ethcoal->rx_max_coalesced_frames_high ||
918 ethcoal->tx_coalesce_usecs_high ||
919 ethcoal->tx_max_coalesced_frames_high)
920 return -EINVAL;
921
922 if (!ethcoal->rx_coalesce_usecs ||
923 !ethcoal->rx_max_coalesced_frames) {
924 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
925 adapter->coal.normal.data.rx_time_us =
926 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
927 adapter->coal.normal.data.rx_packets =
928 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
929 } else {
930 adapter->coal.flags = 0;
931 adapter->coal.normal.data.rx_time_us =
932 ethcoal->rx_coalesce_usecs;
933 adapter->coal.normal.data.rx_packets =
934 ethcoal->rx_max_coalesced_frames;
935 }
936 adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
937 adapter->coal.normal.data.tx_packets =
938 ethcoal->tx_max_coalesced_frames;
939
940 qlcnic_config_intr_coalesce(adapter);
941
942 return 0;
943}
944
945static int qlcnic_get_intr_coalesce(struct net_device *netdev,
946 struct ethtool_coalesce *ethcoal)
947{
948 struct qlcnic_adapter *adapter = netdev_priv(netdev);
949
950 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
951 return -EINVAL;
952
953 ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
954 ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
955 ethcoal->rx_max_coalesced_frames =
956 adapter->coal.normal.data.rx_packets;
957 ethcoal->tx_max_coalesced_frames =
958 adapter->coal.normal.data.tx_packets;
959
960 return 0;
961}
962
963static int qlcnic_set_flags(struct net_device *netdev, u32 data)
964{
965 struct qlcnic_adapter *adapter = netdev_priv(netdev);
966 int hw_lro;
967
968 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
969 return -EINVAL;
970
971 ethtool_op_set_flags(netdev, data);
972
973 hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
974
975 if (qlcnic_config_hw_lro(adapter, hw_lro))
976 return -EIO;
977
978 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
979 return -EIO;
980
981
982 return 0;
983}
984
985const struct ethtool_ops qlcnic_ethtool_ops = {
986 .get_settings = qlcnic_get_settings,
987 .set_settings = qlcnic_set_settings,
988 .get_drvinfo = qlcnic_get_drvinfo,
989 .get_regs_len = qlcnic_get_regs_len,
990 .get_regs = qlcnic_get_regs,
991 .get_link = ethtool_op_get_link,
992 .get_eeprom_len = qlcnic_get_eeprom_len,
993 .get_eeprom = qlcnic_get_eeprom,
994 .get_ringparam = qlcnic_get_ringparam,
995 .set_ringparam = qlcnic_set_ringparam,
996 .get_pauseparam = qlcnic_get_pauseparam,
997 .set_pauseparam = qlcnic_set_pauseparam,
998 .set_tx_csum = ethtool_op_set_tx_csum,
999 .set_sg = ethtool_op_set_sg,
1000 .get_tso = qlcnic_get_tso,
1001 .set_tso = qlcnic_set_tso,
1002 .get_wol = qlcnic_get_wol,
1003 .set_wol = qlcnic_set_wol,
1004 .self_test = qlcnic_diag_test,
1005 .get_strings = qlcnic_get_strings,
1006 .get_ethtool_stats = qlcnic_get_ethtool_stats,
1007 .get_sset_count = qlcnic_get_sset_count,
1008 .get_rx_csum = qlcnic_get_rx_csum,
1009 .set_rx_csum = qlcnic_set_rx_csum,
1010 .get_coalesce = qlcnic_get_intr_coalesce,
1011 .set_coalesce = qlcnic_set_intr_coalesce,
1012 .get_flags = ethtool_op_get_flags,
1013 .set_flags = qlcnic_set_flags,
1014 .phys_id = qlcnic_blink_led,
1015};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 00000000000..0469f84360a
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,937 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#ifndef __QLCNIC_HDR_H_
26#define __QLCNIC_HDR_H_
27
28#include <linux/kernel.h>
29#include <linux/types.h>
30
31/*
32 * The basic unit of access when reading/writing control registers.
33 */
34
35enum {
36 QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
37 QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
38 QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
39 QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
40 QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
41 QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
42 QLCNIC_HW_H6_CH_HUB_ADR = 0x08
43};
44
45/* Hub 0 */
46enum {
47 QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
48 QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
49};
50
51/* Hub 1 */
52enum {
53 QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
54 QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
55 QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
56 QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
57 QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
58 QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
59 QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
60 QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
61 QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
62 QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
63 QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
64 QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
65 QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
66 QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
67 QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
68 QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
69};
70
71/* Hub 2 */
72enum {
73 QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
74 QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
75 QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
76
77 QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
78 QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
79 QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
80 QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
81 QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
82 QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
83 QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
84 QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
85 QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
86 QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
87 QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
88 QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
89 QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
90};
91
92/* Hub 3 */
93enum {
94 QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
95 QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
96 QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
97 QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
98};
99
100/* Hub 4 */
101enum {
102 QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
103 QLCNIC_HW_PEGN1_CRB_AGT_ADR,
104 QLCNIC_HW_PEGN2_CRB_AGT_ADR,
105 QLCNIC_HW_PEGN3_CRB_AGT_ADR,
106 QLCNIC_HW_PEGNI_CRB_AGT_ADR,
107 QLCNIC_HW_PEGND_CRB_AGT_ADR,
108 QLCNIC_HW_PEGNC_CRB_AGT_ADR,
109 QLCNIC_HW_PEGR0_CRB_AGT_ADR,
110 QLCNIC_HW_PEGR1_CRB_AGT_ADR,
111 QLCNIC_HW_PEGR2_CRB_AGT_ADR,
112 QLCNIC_HW_PEGR3_CRB_AGT_ADR,
113 QLCNIC_HW_PEGN4_CRB_AGT_ADR
114};
115
116/* Hub 5 */
117enum {
118 QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
119 QLCNIC_HW_PEGS1_CRB_AGT_ADR,
120 QLCNIC_HW_PEGS2_CRB_AGT_ADR,
121 QLCNIC_HW_PEGS3_CRB_AGT_ADR,
122 QLCNIC_HW_PEGSI_CRB_AGT_ADR,
123 QLCNIC_HW_PEGSD_CRB_AGT_ADR,
124 QLCNIC_HW_PEGSC_CRB_AGT_ADR
125};
126
127/* Hub 6 */
128enum {
129 QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
130 QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
131 QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
132 QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
133 QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
134 QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
135 QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
136 QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
137 QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
138};
139
140/* Floaters - non existent modules */
141#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
142
143/* This field defines PCI/X adr [25:20] of agents on the CRB */
144enum {
145 QLCNIC_HW_PX_MAP_CRB_PH = 0,
146 QLCNIC_HW_PX_MAP_CRB_PS,
147 QLCNIC_HW_PX_MAP_CRB_MN,
148 QLCNIC_HW_PX_MAP_CRB_MS,
149 QLCNIC_HW_PX_MAP_CRB_PGR1,
150 QLCNIC_HW_PX_MAP_CRB_SRE,
151 QLCNIC_HW_PX_MAP_CRB_NIU,
152 QLCNIC_HW_PX_MAP_CRB_QMN,
153 QLCNIC_HW_PX_MAP_CRB_SQN0,
154 QLCNIC_HW_PX_MAP_CRB_SQN1,
155 QLCNIC_HW_PX_MAP_CRB_SQN2,
156 QLCNIC_HW_PX_MAP_CRB_SQN3,
157 QLCNIC_HW_PX_MAP_CRB_QMS,
158 QLCNIC_HW_PX_MAP_CRB_SQS0,
159 QLCNIC_HW_PX_MAP_CRB_SQS1,
160 QLCNIC_HW_PX_MAP_CRB_SQS2,
161 QLCNIC_HW_PX_MAP_CRB_SQS3,
162 QLCNIC_HW_PX_MAP_CRB_PGN0,
163 QLCNIC_HW_PX_MAP_CRB_PGN1,
164 QLCNIC_HW_PX_MAP_CRB_PGN2,
165 QLCNIC_HW_PX_MAP_CRB_PGN3,
166 QLCNIC_HW_PX_MAP_CRB_PGND,
167 QLCNIC_HW_PX_MAP_CRB_PGNI,
168 QLCNIC_HW_PX_MAP_CRB_PGS0,
169 QLCNIC_HW_PX_MAP_CRB_PGS1,
170 QLCNIC_HW_PX_MAP_CRB_PGS2,
171 QLCNIC_HW_PX_MAP_CRB_PGS3,
172 QLCNIC_HW_PX_MAP_CRB_PGSD,
173 QLCNIC_HW_PX_MAP_CRB_PGSI,
174 QLCNIC_HW_PX_MAP_CRB_SN,
175 QLCNIC_HW_PX_MAP_CRB_PGR2,
176 QLCNIC_HW_PX_MAP_CRB_EG,
177 QLCNIC_HW_PX_MAP_CRB_PH2,
178 QLCNIC_HW_PX_MAP_CRB_PS2,
179 QLCNIC_HW_PX_MAP_CRB_CAM,
180 QLCNIC_HW_PX_MAP_CRB_CAS0,
181 QLCNIC_HW_PX_MAP_CRB_CAS1,
182 QLCNIC_HW_PX_MAP_CRB_CAS2,
183 QLCNIC_HW_PX_MAP_CRB_C2C0,
184 QLCNIC_HW_PX_MAP_CRB_C2C1,
185 QLCNIC_HW_PX_MAP_CRB_TIMR,
186 QLCNIC_HW_PX_MAP_CRB_PGR3,
187 QLCNIC_HW_PX_MAP_CRB_RPMX1,
188 QLCNIC_HW_PX_MAP_CRB_RPMX2,
189 QLCNIC_HW_PX_MAP_CRB_RPMX3,
190 QLCNIC_HW_PX_MAP_CRB_RPMX4,
191 QLCNIC_HW_PX_MAP_CRB_RPMX5,
192 QLCNIC_HW_PX_MAP_CRB_RPMX6,
193 QLCNIC_HW_PX_MAP_CRB_RPMX7,
194 QLCNIC_HW_PX_MAP_CRB_XDMA,
195 QLCNIC_HW_PX_MAP_CRB_I2Q,
196 QLCNIC_HW_PX_MAP_CRB_ROMUSB,
197 QLCNIC_HW_PX_MAP_CRB_CAS3,
198 QLCNIC_HW_PX_MAP_CRB_RPMX0,
199 QLCNIC_HW_PX_MAP_CRB_RPMX8,
200 QLCNIC_HW_PX_MAP_CRB_RPMX9,
201 QLCNIC_HW_PX_MAP_CRB_OCM0,
202 QLCNIC_HW_PX_MAP_CRB_OCM1,
203 QLCNIC_HW_PX_MAP_CRB_SMB,
204 QLCNIC_HW_PX_MAP_CRB_I2C0,
205 QLCNIC_HW_PX_MAP_CRB_I2C1,
206 QLCNIC_HW_PX_MAP_CRB_LPC,
207 QLCNIC_HW_PX_MAP_CRB_PGNC,
208 QLCNIC_HW_PX_MAP_CRB_PGR0
209};
210
211/* This field defines CRB adr [31:20] of the agents */
212
213#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
214 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
215#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
216 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
217#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
218 ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
219
220#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
221 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
222#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
223 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
224#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
225 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
226#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
227 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
228#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
229 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
230#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
231 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
232#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
233 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
234#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
235 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
236#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
237 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
238#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
239 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
240#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
241 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
242#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
243 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
244#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
245 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
246#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
247 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
248#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
249 ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
250
251#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
252 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
253#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
254 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
255#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
256 ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
257
258#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
259 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
260#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
261 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
262#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
263 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
264#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
265 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
266#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
267 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
268#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
269 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
270#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
271 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
272#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
273 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
274#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
275 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
276#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
277 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
278#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
279 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
280#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
281 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
282#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
283 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
284#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
285 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
286#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
287 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
288#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
289 ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
290
291#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
292 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
293#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
294 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
295#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
296 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
297#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
298 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
299#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
300 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
301#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
302 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
303#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
304 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
305#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
306 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
307#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
308 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
309#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
310 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
311#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
312 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
313#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
314 ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
315
316#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
317 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
318#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
319 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
320#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
321 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
322#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
323 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
324#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
325 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
326#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
327 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
328#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
329 ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
330
331#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
332 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
333#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
334 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
335#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
336 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
337#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
338 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
339#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
340 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
341#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
342 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
343#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
344 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
345#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
346 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
347#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
348 ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
349
350#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
351
352#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
353
354#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
355#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
356
357#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
358#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
359#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
360#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
361#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
362#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
363#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
364
365#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
366
367#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
368#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
369#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
370#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
371#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
372#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
373
374/* Lock IDs for ROM lock */
375#define ROM_LOCK_DRIVER 0x0d417340
376
377/******************************************************************************
378*
379* Definitions specific to M25P flash
380*
381*******************************************************************************
382*/
383
384/* all are 1MB windows */
385
386#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
387#define QLCNIC_PCI_CRB_WINDOW(A) \
388 (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
389
390#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
391#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
392#define QLCNIC_CRB_ROMUSB \
393 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
394#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
395#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
396#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
397#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
398
399#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
400#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
401#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
402#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
403#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
404#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
405#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
406#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
407#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
408#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
409#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
410
411#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
412#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
413
414#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
415#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
416#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
417#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
418#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
419#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
420#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
421#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
422#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
423#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
424#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
425#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
426#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
427#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
428#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
429#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
430#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
431#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
432#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
433
434#define QLCNIC_PCI_MN_2M (0)
435#define QLCNIC_PCI_MS_2M (0x80000)
436#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
437#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
438#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
439#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
440#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
441
442#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
443
444#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
445#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
446#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
447#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
448#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
449#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
450#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
451#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
452
453/*
454 * Register offsets for MN
455 */
456#define QLCNIC_MIU_CONTROL (0x000)
457#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
458
459/* 200ms delay in each loop */
460#define QLCNIC_NIU_PHY_WAITLEN 200000
461/* 10 seconds before we give up */
462#define QLCNIC_NIU_PHY_WAITMAX 50
463#define QLCNIC_NIU_MAX_GBE_PORTS 4
464#define QLCNIC_NIU_MAX_XG_PORTS 2
465
466#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
467#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
468#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
469
470#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
471 (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
472#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
473 (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
474
475
476#define TEST_AGT_CTRL (0x00)
477
478#define TA_CTL_START 1
479#define TA_CTL_ENABLE 2
480#define TA_CTL_WRITE 4
481#define TA_CTL_BUSY 8
482
483/*
484 * Register offsets for MN
485 */
486#define MIU_TEST_AGT_BASE (0x90)
487
488#define MIU_TEST_AGT_ADDR_LO (0x04)
489#define MIU_TEST_AGT_ADDR_HI (0x08)
490#define MIU_TEST_AGT_WRDATA_LO (0x10)
491#define MIU_TEST_AGT_WRDATA_HI (0x14)
492#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
493#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
494#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
495#define MIU_TEST_AGT_RDDATA_LO (0x18)
496#define MIU_TEST_AGT_RDDATA_HI (0x1c)
497#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
498#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
499#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
500
501#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
502#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
503
504/*
505 * Register offsets for MS
506 */
507#define SIU_TEST_AGT_BASE (0x60)
508
509#define SIU_TEST_AGT_ADDR_LO (0x04)
510#define SIU_TEST_AGT_ADDR_HI (0x18)
511#define SIU_TEST_AGT_WRDATA_LO (0x08)
512#define SIU_TEST_AGT_WRDATA_HI (0x0c)
513#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
514#define SIU_TEST_AGT_RDDATA_LO (0x10)
515#define SIU_TEST_AGT_RDDATA_HI (0x14)
516#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
517
518#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
519#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
520
521/* XG Link status */
522#define XG_LINK_UP 0x10
523#define XG_LINK_DOWN 0x20
524
525#define XG_LINK_UP_P3 0x01
526#define XG_LINK_DOWN_P3 0x02
527#define XG_LINK_STATE_P3_MASK 0xf
528#define XG_LINK_STATE_P3(pcifn, val) \
529 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
530
531#define P3_LINK_SPEED_MHZ 100
532#define P3_LINK_SPEED_MASK 0xff
533#define P3_LINK_SPEED_REG(pcifn) \
534 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
535#define P3_LINK_SPEED_VAL(pcifn, reg) \
536 (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
537
538#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
539#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
540#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
541#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
542#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
543#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
544#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
545#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
546
547#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
548#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
549#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
550#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
551
552#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
553#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
554#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
555#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
556#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
557
558#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
559#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
560
561#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98))
562#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
563#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
564
565#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
566#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
567
568#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
569
570#define CRB_V2P_0 (QLCNIC_REG(0x290))
571#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
572#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
573
574#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
575#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
576#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
577#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
578
579#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
580#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
581
582/*
583 * capabilities register, can be used to selectively enable/disable features
584 * for backward compability
585 */
586#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
587#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
588#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
589#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
590
591#define INTR_SCHEME_PERPORT 0x1
592#define MSI_MODE_MULTIFUNC 0x1
593
594/* used for ethtool tests */
595#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
596
597/*
598 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
599 * which can be read by the Phantom host to get producer/consumer indexes from
600 * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
601 * registers will be used for the addresses of the ring's shared memory
602 * on the Phantom.
603 */
604
605#define qlcnic_get_temp_val(x) ((x) >> 16)
606#define qlcnic_get_temp_state(x) ((x) & 0xffff)
607#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
608
609/*
610 * Temperature control.
611 */
612enum {
613 QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
614 QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
615 QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
616};
617
618/* Lock IDs for PHY lock */
619#define PHY_LOCK_DRIVER 0x44524956
620
621/* Used for PS PCI Memory access */
622#define PCIX_PS_OP_ADDR_LO (0x10000)
623/* via CRB (PS side only) */
624#define PCIX_PS_OP_ADDR_HI (0x10004)
625
626#define PCIX_INT_VECTOR (0x10100)
627#define PCIX_INT_MASK (0x10104)
628
629#define PCIX_OCM_WINDOW (0x10800)
630#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
631
632#define PCIX_TARGET_STATUS (0x10118)
633#define PCIX_TARGET_STATUS_F1 (0x10160)
634#define PCIX_TARGET_STATUS_F2 (0x10164)
635#define PCIX_TARGET_STATUS_F3 (0x10168)
636#define PCIX_TARGET_STATUS_F4 (0x10360)
637#define PCIX_TARGET_STATUS_F5 (0x10364)
638#define PCIX_TARGET_STATUS_F6 (0x10368)
639#define PCIX_TARGET_STATUS_F7 (0x1036c)
640
641#define PCIX_TARGET_MASK (0x10128)
642#define PCIX_TARGET_MASK_F1 (0x10170)
643#define PCIX_TARGET_MASK_F2 (0x10174)
644#define PCIX_TARGET_MASK_F3 (0x10178)
645#define PCIX_TARGET_MASK_F4 (0x10370)
646#define PCIX_TARGET_MASK_F5 (0x10374)
647#define PCIX_TARGET_MASK_F6 (0x10378)
648#define PCIX_TARGET_MASK_F7 (0x1037c)
649
650#define PCIX_MSI_F(i) (0x13000+((i)*4))
651
652#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
653#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
654#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
655
656#define PCIE_SEM0_LOCK (0x1c000)
657#define PCIE_SEM0_UNLOCK (0x1c004)
658#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
659#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
660
661#define PCIE_SETUP_FUNCTION (0x12040)
662#define PCIE_SETUP_FUNCTION2 (0x12048)
663#define PCIE_MISCCFG_RC (0x1206c)
664#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
665#define PCIE_CHICKEN3 (0x120c8)
666
667#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
668#define PCIE_MAX_MASTER_SPLIT (0x14048)
669
670#define QLCNIC_PORT_MODE_NONE 0
671#define QLCNIC_PORT_MODE_XG 1
672#define QLCNIC_PORT_MODE_GB 2
673#define QLCNIC_PORT_MODE_802_3_AP 3
674#define QLCNIC_PORT_MODE_AUTO_NEG 4
675#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
676#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
677#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
678#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
679
680#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
681#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
682
683#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
684#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
685
686#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
687#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
688#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
689#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
690#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
691#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
692
693#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
694#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
695#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
696#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
697
698 /* Device State */
699#define QLCNIC_DEV_COLD 1
700#define QLCNIC_DEV_INITALIZING 2
701#define QLCNIC_DEV_READY 3
702#define QLCNIC_DEV_NEED_RESET 4
703#define QLCNIC_DEV_NEED_QUISCENT 5
704#define QLCNIC_DEV_FAILED 6
705
706#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
707#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
708#define QLCNIC_RCODE_FATAL_ERROR 0x80000000
709#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
710#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
711
712#define FW_POLL_DELAY (2 * HZ)
713#define FW_FAIL_THRESH 3
714#define FW_POLL_THRESH 10
715
716#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
717#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
718
719/*
720 * PCI Interrupt Vector Values.
721 */
722#define PCIX_INT_VECTOR_BIT_F0 0x0080
723#define PCIX_INT_VECTOR_BIT_F1 0x0100
724#define PCIX_INT_VECTOR_BIT_F2 0x0200
725#define PCIX_INT_VECTOR_BIT_F3 0x0400
726#define PCIX_INT_VECTOR_BIT_F4 0x0800
727#define PCIX_INT_VECTOR_BIT_F5 0x1000
728#define PCIX_INT_VECTOR_BIT_F6 0x2000
729#define PCIX_INT_VECTOR_BIT_F7 0x4000
730
731struct qlcnic_legacy_intr_set {
732 u32 int_vec_bit;
733 u32 tgt_status_reg;
734 u32 tgt_mask_reg;
735 u32 pci_int_reg;
736};
737
738#define QLCNIC_LEGACY_INTR_CONFIG \
739{ \
740 { \
741 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
742 .tgt_status_reg = ISR_INT_TARGET_STATUS, \
743 .tgt_mask_reg = ISR_INT_TARGET_MASK, \
744 .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
745 \
746 { \
747 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
748 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
749 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
750 .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
751 \
752 { \
753 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
754 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
755 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
756 .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
757 \
758 { \
759 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
760 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
761 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
762 .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
763 \
764 { \
765 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
766 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
767 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
768 .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
769 \
770 { \
771 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
772 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
773 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
774 .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
775 \
776 { \
777 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
778 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
779 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
780 .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
781 \
782 { \
783 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
784 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
785 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
786 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
787}
788
789/* NIU REGS */
790
791#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
792
793/*
794 * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
795 *
796 * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
797 * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
798 * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
799 * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
800 * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
801 * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
802 * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
803 * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
804 * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
805 * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
806 * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
807 * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
808 */
809#define qlcnic_gb_rx_flowctl(config_word) \
810 ((config_word) |= 1 << 5)
811#define qlcnic_gb_get_rx_flowctl(config_word) \
812 _qlcnic_crb_get_bit((config_word), 5)
813#define qlcnic_gb_unset_rx_flowctl(config_word) \
814 ((config_word) &= ~(1 << 5))
815
816/*
817 * NIU GB Pause Ctl Register
818 */
819
820#define qlcnic_gb_set_gb0_mask(config_word) \
821 ((config_word) |= 1 << 0)
822#define qlcnic_gb_set_gb1_mask(config_word) \
823 ((config_word) |= 1 << 2)
824#define qlcnic_gb_set_gb2_mask(config_word) \
825 ((config_word) |= 1 << 4)
826#define qlcnic_gb_set_gb3_mask(config_word) \
827 ((config_word) |= 1 << 6)
828
829#define qlcnic_gb_get_gb0_mask(config_word) \
830 _qlcnic_crb_get_bit((config_word), 0)
831#define qlcnic_gb_get_gb1_mask(config_word) \
832 _qlcnic_crb_get_bit((config_word), 2)
833#define qlcnic_gb_get_gb2_mask(config_word) \
834 _qlcnic_crb_get_bit((config_word), 4)
835#define qlcnic_gb_get_gb3_mask(config_word) \
836 _qlcnic_crb_get_bit((config_word), 6)
837
838#define qlcnic_gb_unset_gb0_mask(config_word) \
839 ((config_word) &= ~(1 << 0))
840#define qlcnic_gb_unset_gb1_mask(config_word) \
841 ((config_word) &= ~(1 << 2))
842#define qlcnic_gb_unset_gb2_mask(config_word) \
843 ((config_word) &= ~(1 << 4))
844#define qlcnic_gb_unset_gb3_mask(config_word) \
845 ((config_word) &= ~(1 << 6))
846
847/*
848 * NIU XG Pause Ctl Register
849 *
850 * Bit 0 : xg0_mask => 1:disable tx pause frames
851 * Bit 1 : xg0_request => 1:request single pause frame
852 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
853 * Bit 3 : xg1_mask => 1:disable tx pause frames
854 * Bit 4 : xg1_request => 1:request single pause frame
855 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
856 */
857
858#define qlcnic_xg_set_xg0_mask(config_word) \
859 ((config_word) |= 1 << 0)
860#define qlcnic_xg_set_xg1_mask(config_word) \
861 ((config_word) |= 1 << 3)
862
863#define qlcnic_xg_get_xg0_mask(config_word) \
864 _qlcnic_crb_get_bit((config_word), 0)
865#define qlcnic_xg_get_xg1_mask(config_word) \
866 _qlcnic_crb_get_bit((config_word), 3)
867
868#define qlcnic_xg_unset_xg0_mask(config_word) \
869 ((config_word) &= ~(1 << 0))
870#define qlcnic_xg_unset_xg1_mask(config_word) \
871 ((config_word) &= ~(1 << 3))
872
873/*
874 * NIU XG Pause Ctl Register
875 *
876 * Bit 0 : xg0_mask => 1:disable tx pause frames
877 * Bit 1 : xg0_request => 1:request single pause frame
878 * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
879 * Bit 3 : xg1_mask => 1:disable tx pause frames
880 * Bit 4 : xg1_request => 1:request single pause frame
881 * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
882 */
883
884/*
885 * PHY-Specific MII control/status registers.
886 */
887#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
888#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
889
890/*
891 * PHY-Specific Status Register (reg 17).
892 *
893 * Bit 0 : jabber => 1:jabber detected, 0:not
894 * Bit 1 : polarity => 1:polarity reversed, 0:normal
895 * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
896 * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
897 * Bit 4 : energydetect => 1:sleep, 0:active
898 * Bit 5 : downshift => 1:downshift, 0:no downshift
899 * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
900 * Bits 7-9 : cablelen => not valid in 10Mb/s mode
901 * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
902 * Bit 10 : link => 1:link up, 0:link down
903 * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
904 * Bit 12 : pagercvd => 1:page received, 0:page not received
905 * Bit 13 : duplex => 1:full duplex, 0:half duplex
906 * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
907 */
908
909#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
910
911#define qlcnic_set_phy_speed(config_word, val) \
912 ((config_word) |= ((val & 0x03) << 14))
913#define qlcnic_set_phy_duplex(config_word) \
914 ((config_word) |= 1 << 13)
915#define qlcnic_clear_phy_duplex(config_word) \
916 ((config_word) &= ~(1 << 13))
917
918#define qlcnic_get_phy_link(config_word) \
919 _qlcnic_crb_get_bit(config_word, 10)
920#define qlcnic_get_phy_duplex(config_word) \
921 _qlcnic_crb_get_bit(config_word, 13)
922
923#define QLCNIC_NIU_NON_PROMISC_MODE 0
924#define QLCNIC_NIU_PROMISC_MODE 1
925#define QLCNIC_NIU_ALLMULTI_MODE 2
926
927struct crb_128M_2M_sub_block_map {
928 unsigned valid;
929 unsigned start_128M;
930 unsigned end_128M;
931 unsigned start_2M;
932};
933
934struct crb_128M_2M_block_map{
935 struct crb_128M_2M_sub_block_map sub_block[16];
936};
937#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 00000000000..dc6cd69d6d9
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1275 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include "qlcnic.h"
26
27#include <net/ip.h>
28
29#define MASK(n) ((1ULL<<(n))-1)
30#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
31
32#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
33
34#define CRB_BLK(off) ((off >> 20) & 0x3f)
35#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
36#define CRB_WINDOW_2M (0x130060)
37#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
38#define CRB_INDIRECT_2M (0x1e0000UL)
39
40
41#ifndef readq
42static inline u64 readq(void __iomem *addr)
43{
44 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
45}
46#endif
47
48#ifndef writeq
49static inline void writeq(u64 val, void __iomem *addr)
50{
51 writel(((u32) (val)), (addr));
52 writel(((u32) (val >> 32)), (addr + 4));
53}
54#endif
55
56#define ADDR_IN_RANGE(addr, low, high) \
57 (((addr) < (high)) && ((addr) >= (low)))
58
59#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
60 ((adapter)->ahw.pci_base0 + (off))
61
62static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
63 unsigned long off)
64{
65 if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
66 return PCI_OFFSET_FIRST_RANGE(adapter, off);
67
68 return NULL;
69}
70
71static const struct crb_128M_2M_block_map
72crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
73 {{{0, 0, 0, 0} } }, /* 0: PCI */
74 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
75 {1, 0x0110000, 0x0120000, 0x130000},
76 {1, 0x0120000, 0x0122000, 0x124000},
77 {1, 0x0130000, 0x0132000, 0x126000},
78 {1, 0x0140000, 0x0142000, 0x128000},
79 {1, 0x0150000, 0x0152000, 0x12a000},
80 {1, 0x0160000, 0x0170000, 0x110000},
81 {1, 0x0170000, 0x0172000, 0x12e000},
82 {0, 0x0000000, 0x0000000, 0x000000},
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {1, 0x01e0000, 0x01e0800, 0x122000},
89 {0, 0x0000000, 0x0000000, 0x000000} } },
90 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
91 {{{0, 0, 0, 0} } }, /* 3: */
92 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
93 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
94 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
95 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
96 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
97 {0, 0x0000000, 0x0000000, 0x000000},
98 {0, 0x0000000, 0x0000000, 0x000000},
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {1, 0x08f0000, 0x08f2000, 0x172000} } },
112 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
113 {0, 0x0000000, 0x0000000, 0x000000},
114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {1, 0x09f0000, 0x09f2000, 0x176000} } },
128 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
129 {0, 0x0000000, 0x0000000, 0x000000},
130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000},
141 {0, 0x0000000, 0x0000000, 0x000000},
142 {0, 0x0000000, 0x0000000, 0x000000},
143 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
144 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
145 {0, 0x0000000, 0x0000000, 0x000000},
146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {0, 0x0000000, 0x0000000, 0x000000},
158 {0, 0x0000000, 0x0000000, 0x000000},
159 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
160 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
161 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
162 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
163 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
164 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
165 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
166 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
167 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
168 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
169 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
170 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
171 {{{0, 0, 0, 0} } }, /* 23: */
172 {{{0, 0, 0, 0} } }, /* 24: */
173 {{{0, 0, 0, 0} } }, /* 25: */
174 {{{0, 0, 0, 0} } }, /* 26: */
175 {{{0, 0, 0, 0} } }, /* 27: */
176 {{{0, 0, 0, 0} } }, /* 28: */
177 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
178 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
179 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
180 {{{0} } }, /* 32: PCI */
181 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
182 {1, 0x2110000, 0x2120000, 0x130000},
183 {1, 0x2120000, 0x2122000, 0x124000},
184 {1, 0x2130000, 0x2132000, 0x126000},
185 {1, 0x2140000, 0x2142000, 0x128000},
186 {1, 0x2150000, 0x2152000, 0x12a000},
187 {1, 0x2160000, 0x2170000, 0x110000},
188 {1, 0x2170000, 0x2172000, 0x12e000},
189 {0, 0x0000000, 0x0000000, 0x000000},
190 {0, 0x0000000, 0x0000000, 0x000000},
191 {0, 0x0000000, 0x0000000, 0x000000},
192 {0, 0x0000000, 0x0000000, 0x000000},
193 {0, 0x0000000, 0x0000000, 0x000000},
194 {0, 0x0000000, 0x0000000, 0x000000},
195 {0, 0x0000000, 0x0000000, 0x000000},
196 {0, 0x0000000, 0x0000000, 0x000000} } },
197 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
198 {{{0} } }, /* 35: */
199 {{{0} } }, /* 36: */
200 {{{0} } }, /* 37: */
201 {{{0} } }, /* 38: */
202 {{{0} } }, /* 39: */
203 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
204 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
205 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
206 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
207 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
208 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
209 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
210 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
211 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
212 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
213 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
214 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
215 {{{0} } }, /* 52: */
216 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
217 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
218 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
219 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
220 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
221 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
222 {{{0} } }, /* 59: I2C0 */
223 {{{0} } }, /* 60: I2C1 */
224 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
225 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
226 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
227};
228
229/*
230 * top 12 bits of crb internal address (hub, agent)
231 */
232static const unsigned crb_hub_agt[64] = {
233 0,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
235 QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
237 0,
238 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
239 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
240 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
241 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
242 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
244 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
255 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
260 0,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
263 0,
264 QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
265 0,
266 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
267 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
268 0,
269 0,
270 0,
271 0,
272 0,
273 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
274 0,
275 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
276 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
277 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
278 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
279 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
280 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
281 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
282 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
283 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
284 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
285 0,
286 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
287 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
288 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
289 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
290 0,
291 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
292 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
293 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
294 0,
295 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
296 0,
297};
298
299/* PCI Windowing for DDR regions. */
300
301#define QLCNIC_PCIE_SEM_TIMEOUT 10000
302
303int
304qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
305{
306 int done = 0, timeout = 0;
307
308 while (!done) {
309 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
310 if (done == 1)
311 break;
312 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
313 return -EIO;
314 msleep(1);
315 }
316
317 if (id_reg)
318 QLCWR32(adapter, id_reg, adapter->portnum);
319
320 return 0;
321}
322
323void
324qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
325{
326 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
327}
328
329static int
330qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
331 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
332{
333 u32 i, producer, consumer;
334 struct qlcnic_cmd_buffer *pbuf;
335 struct cmd_desc_type0 *cmd_desc;
336 struct qlcnic_host_tx_ring *tx_ring;
337
338 i = 0;
339
340 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
341 return -EIO;
342
343 tx_ring = adapter->tx_ring;
344 __netif_tx_lock_bh(tx_ring->txq);
345
346 producer = tx_ring->producer;
347 consumer = tx_ring->sw_consumer;
348
349 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
350 netif_tx_stop_queue(tx_ring->txq);
351 __netif_tx_unlock_bh(tx_ring->txq);
352 return -EBUSY;
353 }
354
355 do {
356 cmd_desc = &cmd_desc_arr[i];
357
358 pbuf = &tx_ring->cmd_buf_arr[producer];
359 pbuf->skb = NULL;
360 pbuf->frag_count = 0;
361
362 memcpy(&tx_ring->desc_head[producer],
363 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
364
365 producer = get_next_index(producer, tx_ring->num_desc);
366 i++;
367
368 } while (i != nr_desc);
369
370 tx_ring->producer = producer;
371
372 qlcnic_update_cmd_producer(adapter, tx_ring);
373
374 __netif_tx_unlock_bh(tx_ring->txq);
375
376 return 0;
377}
378
379static int
380qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
381 unsigned op)
382{
383 struct qlcnic_nic_req req;
384 struct qlcnic_mac_req *mac_req;
385 u64 word;
386
387 memset(&req, 0, sizeof(struct qlcnic_nic_req));
388 req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
389
390 word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
391 req.req_hdr = cpu_to_le64(word);
392
393 mac_req = (struct qlcnic_mac_req *)&req.words[0];
394 mac_req->op = op;
395 memcpy(mac_req->mac_addr, addr, 6);
396
397 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
398}
399
400static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter,
401 u8 *addr, struct list_head *del_list)
402{
403 struct list_head *head;
404 struct qlcnic_mac_list_s *cur;
405
406 /* look up if already exists */
407 list_for_each(head, del_list) {
408 cur = list_entry(head, struct qlcnic_mac_list_s, list);
409
410 if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
411 list_move_tail(head, &adapter->mac_list);
412 return 0;
413 }
414 }
415
416 cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
417 if (cur == NULL) {
418 dev_err(&adapter->netdev->dev,
419 "failed to add mac address filter\n");
420 return -ENOMEM;
421 }
422 memcpy(cur->mac_addr, addr, ETH_ALEN);
423 list_add_tail(&cur->list, &adapter->mac_list);
424
425 return qlcnic_sre_macaddr_change(adapter,
426 cur->mac_addr, QLCNIC_MAC_ADD);
427}
428
429void qlcnic_set_multi(struct net_device *netdev)
430{
431 struct qlcnic_adapter *adapter = netdev_priv(netdev);
432 struct dev_mc_list *mc_ptr;
433 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
434 u32 mode = VPORT_MISS_MODE_DROP;
435 LIST_HEAD(del_list);
436 struct list_head *head;
437 struct qlcnic_mac_list_s *cur;
438
439 list_splice_tail_init(&adapter->mac_list, &del_list);
440
441 qlcnic_nic_add_mac(adapter, adapter->mac_addr, &del_list);
442 qlcnic_nic_add_mac(adapter, bcast_addr, &del_list);
443
444 if (netdev->flags & IFF_PROMISC) {
445 mode = VPORT_MISS_MODE_ACCEPT_ALL;
446 goto send_fw_cmd;
447 }
448
449 if ((netdev->flags & IFF_ALLMULTI) ||
450 (netdev->mc_count > adapter->max_mc_count)) {
451 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
452 goto send_fw_cmd;
453 }
454
455 if (netdev->mc_count > 0) {
456 for (mc_ptr = netdev->mc_list; mc_ptr;
457 mc_ptr = mc_ptr->next) {
458 qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
459 &del_list);
460 }
461 }
462
463send_fw_cmd:
464 qlcnic_nic_set_promisc(adapter, mode);
465 head = &del_list;
466 while (!list_empty(head)) {
467 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
468
469 qlcnic_sre_macaddr_change(adapter,
470 cur->mac_addr, QLCNIC_MAC_DEL);
471 list_del(&cur->list);
472 kfree(cur);
473 }
474}
475
476int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
477{
478 struct qlcnic_nic_req req;
479 u64 word;
480
481 memset(&req, 0, sizeof(struct qlcnic_nic_req));
482
483 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
484
485 word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
486 ((u64)adapter->portnum << 16);
487 req.req_hdr = cpu_to_le64(word);
488
489 req.words[0] = cpu_to_le64(mode);
490
491 return qlcnic_send_cmd_descs(adapter,
492 (struct cmd_desc_type0 *)&req, 1);
493}
494
495void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
496{
497 struct qlcnic_mac_list_s *cur;
498 struct list_head *head = &adapter->mac_list;
499
500 while (!list_empty(head)) {
501 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
502 qlcnic_sre_macaddr_change(adapter,
503 cur->mac_addr, QLCNIC_MAC_DEL);
504 list_del(&cur->list);
505 kfree(cur);
506 }
507}
508
509#define QLCNIC_CONFIG_INTR_COALESCE 3
510
511/*
512 * Send the interrupt coalescing parameter set by ethtool to the card.
513 */
514int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
515{
516 struct qlcnic_nic_req req;
517 u64 word[6];
518 int rv, i;
519
520 memset(&req, 0, sizeof(struct qlcnic_nic_req));
521
522 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
523
524 word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
525 req.req_hdr = cpu_to_le64(word[0]);
526
527 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
528 for (i = 0; i < 6; i++)
529 req.words[i] = cpu_to_le64(word[i]);
530
531 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
532 if (rv != 0)
533 dev_err(&adapter->netdev->dev,
534 "Could not send interrupt coalescing parameters\n");
535
536 return rv;
537}
538
539int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
540{
541 struct qlcnic_nic_req req;
542 u64 word;
543 int rv;
544
545 if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
546 return 0;
547
548 memset(&req, 0, sizeof(struct qlcnic_nic_req));
549
550 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
551
552 word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
553 req.req_hdr = cpu_to_le64(word);
554
555 req.words[0] = cpu_to_le64(enable);
556
557 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
558 if (rv != 0)
559 dev_err(&adapter->netdev->dev,
560 "Could not send configure hw lro request\n");
561
562 adapter->flags ^= QLCNIC_LRO_ENABLED;
563
564 return rv;
565}
566
567int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
568{
569 struct qlcnic_nic_req req;
570 u64 word;
571 int rv;
572
573 if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
574 return 0;
575
576 memset(&req, 0, sizeof(struct qlcnic_nic_req));
577
578 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
579
580 word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
581 ((u64)adapter->portnum << 16);
582 req.req_hdr = cpu_to_le64(word);
583
584 req.words[0] = cpu_to_le64(enable);
585
586 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
587 if (rv != 0)
588 dev_err(&adapter->netdev->dev,
589 "Could not send configure bridge mode request\n");
590
591 adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
592
593 return rv;
594}
595
596
597#define RSS_HASHTYPE_IP_TCP 0x3
598
599int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
600{
601 struct qlcnic_nic_req req;
602 u64 word;
603 int i, rv;
604
605 const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
606 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
607 0x255b0ec26d5a56daULL };
608
609
610 memset(&req, 0, sizeof(struct qlcnic_nic_req));
611 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
612
613 word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
614 req.req_hdr = cpu_to_le64(word);
615
616 /*
617 * RSS request:
618 * bits 3-0: hash_method
619 * 5-4: hash_type_ipv4
620 * 7-6: hash_type_ipv6
621 * 8: enable
622 * 9: use indirection table
623 * 47-10: reserved
624 * 63-48: indirection table mask
625 */
626 word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
627 ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
628 ((u64)(enable & 0x1) << 8) |
629 ((0x7ULL) << 48);
630 req.words[0] = cpu_to_le64(word);
631 for (i = 0; i < 5; i++)
632 req.words[i+1] = cpu_to_le64(key[i]);
633
634 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
635 if (rv != 0)
636 dev_err(&adapter->netdev->dev, "could not configure RSS\n");
637
638 return rv;
639}
640
641int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
642{
643 struct qlcnic_nic_req req;
644 u64 word;
645 int rv;
646
647 memset(&req, 0, sizeof(struct qlcnic_nic_req));
648 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
649
650 word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
651 req.req_hdr = cpu_to_le64(word);
652
653 req.words[0] = cpu_to_le64(cmd);
654 req.words[1] = cpu_to_le64(ip);
655
656 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
657 if (rv != 0)
658 dev_err(&adapter->netdev->dev,
659 "could not notify %s IP 0x%x reuqest\n",
660 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
661
662 return rv;
663}
664
665int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
666{
667 struct qlcnic_nic_req req;
668 u64 word;
669 int rv;
670
671 memset(&req, 0, sizeof(struct qlcnic_nic_req));
672 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
673
674 word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
675 req.req_hdr = cpu_to_le64(word);
676 req.words[0] = cpu_to_le64(enable | (enable << 8));
677
678 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
679 if (rv != 0)
680 dev_err(&adapter->netdev->dev,
681 "could not configure link notification\n");
682
683 return rv;
684}
685
686int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
687{
688 struct qlcnic_nic_req req;
689 u64 word;
690 int rv;
691
692 memset(&req, 0, sizeof(struct qlcnic_nic_req));
693 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
694
695 word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
696 ((u64)adapter->portnum << 16) |
697 ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
698
699 req.req_hdr = cpu_to_le64(word);
700
701 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
702 if (rv != 0)
703 dev_err(&adapter->netdev->dev,
704 "could not cleanup lro flows\n");
705
706 return rv;
707}
708
709/*
710 * qlcnic_change_mtu - Change the Maximum Transfer Unit
711 * @returns 0 on success, negative on failure
712 */
713
714int qlcnic_change_mtu(struct net_device *netdev, int mtu)
715{
716 struct qlcnic_adapter *adapter = netdev_priv(netdev);
717 int rc = 0;
718
719 if (mtu > P3_MAX_MTU) {
720 dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
721 P3_MAX_MTU);
722 return -EINVAL;
723 }
724
725 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
726
727 if (!rc)
728 netdev->mtu = mtu;
729
730 return rc;
731}
732
733int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
734{
735 u32 crbaddr, mac_hi, mac_lo;
736 int pci_func = adapter->ahw.pci_func;
737
738 crbaddr = CRB_MAC_BLOCK_START +
739 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
740
741 mac_lo = QLCRD32(adapter, crbaddr);
742 mac_hi = QLCRD32(adapter, crbaddr+4);
743
744 if (pci_func & 1)
745 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
746 else
747 *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
748
749 return 0;
750}
751
752/*
753 * Changes the CRB window to the specified window.
754 */
755 /* Returns < 0 if off is not valid,
756 * 1 if window access is needed. 'off' is set to offset from
757 * CRB space in 128M pci map
758 * 0 if no window access is needed. 'off' is set to 2M addr
759 * In: 'off' is offset from base in 128M pci map
760 */
761static int
762qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
763 ulong off, void __iomem **addr)
764{
765 const struct crb_128M_2M_sub_block_map *m;
766
767 if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
768 return -EINVAL;
769
770 off -= QLCNIC_PCI_CRBSPACE;
771
772 /*
773 * Try direct map
774 */
775 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
776
777 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
778 *addr = adapter->ahw.pci_base0 + m->start_2M +
779 (off - m->start_128M);
780 return 0;
781 }
782
783 /*
784 * Not in direct map, use crb window
785 */
786 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
787 return 1;
788}
789
790/*
791 * In: 'off' is offset from CRB space in 128M pci map
792 * Out: 'off' is 2M pci map addr
793 * side effect: lock crb window
794 */
795static void
796qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
797{
798 u32 window;
799 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
800
801 off -= QLCNIC_PCI_CRBSPACE;
802
803 window = CRB_HI(off);
804
805 if (adapter->ahw.crb_win == window)
806 return;
807
808 writel(window, addr);
809 if (readl(addr) != window) {
810 if (printk_ratelimit())
811 dev_warn(&adapter->pdev->dev,
812 "failed to set CRB window to %d off 0x%lx\n",
813 window, off);
814 }
815 adapter->ahw.crb_win = window;
816}
817
818int
819qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
820{
821 unsigned long flags;
822 int rv;
823 void __iomem *addr = NULL;
824
825 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
826
827 if (rv == 0) {
828 writel(data, addr);
829 return 0;
830 }
831
832 if (rv > 0) {
833 /* indirect access */
834 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
835 crb_win_lock(adapter);
836 qlcnic_pci_set_crbwindow_2M(adapter, off);
837 writel(data, addr);
838 crb_win_unlock(adapter);
839 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
840 return 0;
841 }
842
843 dev_err(&adapter->pdev->dev,
844 "%s: invalid offset: 0x%016lx\n", __func__, off);
845 dump_stack();
846 return -EIO;
847}
848
849u32
850qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
851{
852 unsigned long flags;
853 int rv;
854 u32 data;
855 void __iomem *addr = NULL;
856
857 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
858
859 if (rv == 0)
860 return readl(addr);
861
862 if (rv > 0) {
863 /* indirect access */
864 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
865 crb_win_lock(adapter);
866 qlcnic_pci_set_crbwindow_2M(adapter, off);
867 data = readl(addr);
868 crb_win_unlock(adapter);
869 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
870 return data;
871 }
872
873 dev_err(&adapter->pdev->dev,
874 "%s: invalid offset: 0x%016lx\n", __func__, off);
875 dump_stack();
876 return -1;
877}
878
879
880void __iomem *
881qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
882{
883 void __iomem *addr = NULL;
884
885 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
886
887 return addr;
888}
889
890
891static int
892qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
893 u64 addr, u32 *start)
894{
895 u32 window;
896 struct pci_dev *pdev = adapter->pdev;
897
898 if ((addr & 0x00ff800) == 0xff800) {
899 if (printk_ratelimit())
900 dev_warn(&pdev->dev, "QM access not handled\n");
901 return -EIO;
902 }
903
904 window = OCM_WIN_P3P(addr);
905
906 writel(window, adapter->ahw.ocm_win_crb);
907 /* read back to flush */
908 readl(adapter->ahw.ocm_win_crb);
909
910 adapter->ahw.ocm_win = window;
911 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
912 return 0;
913}
914
915static int
916qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
917 u64 *data, int op)
918{
919 void __iomem *addr, *mem_ptr = NULL;
920 resource_size_t mem_base;
921 int ret;
922 u32 start;
923
924 mutex_lock(&adapter->ahw.mem_lock);
925
926 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
927 if (ret != 0)
928 goto unlock;
929
930 addr = pci_base_offset(adapter, start);
931 if (addr)
932 goto noremap;
933
934 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
935
936 mem_ptr = ioremap(mem_base, PAGE_SIZE);
937 if (mem_ptr == NULL) {
938 ret = -EIO;
939 goto unlock;
940 }
941
942 addr = mem_ptr + (start & (PAGE_SIZE - 1));
943
944noremap:
945 if (op == 0) /* read */
946 *data = readq(addr);
947 else /* write */
948 writeq(*data, addr);
949
950unlock:
951 mutex_unlock(&adapter->ahw.mem_lock);
952
953 if (mem_ptr)
954 iounmap(mem_ptr);
955 return ret;
956}
957
958#define MAX_CTL_CHECK 1000
959
960int
961qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
962 u64 off, u64 data)
963{
964 int i, j, ret;
965 u32 temp, off8;
966 u64 stride;
967 void __iomem *mem_crb;
968
969 /* Only 64-bit aligned access */
970 if (off & 7)
971 return -EIO;
972
973 /* P3 onward, test agent base for MIU and SIU is same */
974 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
975 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
976 mem_crb = qlcnic_get_ioaddr(adapter,
977 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
978 goto correct;
979 }
980
981 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
982 mem_crb = qlcnic_get_ioaddr(adapter,
983 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
984 goto correct;
985 }
986
987 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
988 return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
989
990 return -EIO;
991
992correct:
993 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
994
995 off8 = off & ~(stride-1);
996
997 mutex_lock(&adapter->ahw.mem_lock);
998
999 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1000 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1001
1002 i = 0;
1003 if (stride == 16) {
1004 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1005 writel((TA_CTL_START | TA_CTL_ENABLE),
1006 (mem_crb + TEST_AGT_CTRL));
1007
1008 for (j = 0; j < MAX_CTL_CHECK; j++) {
1009 temp = readl(mem_crb + TEST_AGT_CTRL);
1010 if ((temp & TA_CTL_BUSY) == 0)
1011 break;
1012 }
1013
1014 if (j >= MAX_CTL_CHECK) {
1015 ret = -EIO;
1016 goto done;
1017 }
1018
1019 i = (off & 0xf) ? 0 : 2;
1020 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1021 mem_crb + MIU_TEST_AGT_WRDATA(i));
1022 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1023 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1024 i = (off & 0xf) ? 2 : 0;
1025 }
1026
1027 writel(data & 0xffffffff,
1028 mem_crb + MIU_TEST_AGT_WRDATA(i));
1029 writel((data >> 32) & 0xffffffff,
1030 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1031
1032 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1033 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1034 (mem_crb + TEST_AGT_CTRL));
1035
1036 for (j = 0; j < MAX_CTL_CHECK; j++) {
1037 temp = readl(mem_crb + TEST_AGT_CTRL);
1038 if ((temp & TA_CTL_BUSY) == 0)
1039 break;
1040 }
1041
1042 if (j >= MAX_CTL_CHECK) {
1043 if (printk_ratelimit())
1044 dev_err(&adapter->pdev->dev,
1045 "failed to write through agent\n");
1046 ret = -EIO;
1047 } else
1048 ret = 0;
1049
1050done:
1051 mutex_unlock(&adapter->ahw.mem_lock);
1052
1053 return ret;
1054}
1055
1056int
1057qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1058 u64 off, u64 *data)
1059{
1060 int j, ret;
1061 u32 temp, off8;
1062 u64 val, stride;
1063 void __iomem *mem_crb;
1064
1065 /* Only 64-bit aligned access */
1066 if (off & 7)
1067 return -EIO;
1068
1069 /* P3 onward, test agent base for MIU and SIU is same */
1070 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1071 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
1072 mem_crb = qlcnic_get_ioaddr(adapter,
1073 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1074 goto correct;
1075 }
1076
1077 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1078 mem_crb = qlcnic_get_ioaddr(adapter,
1079 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1080 goto correct;
1081 }
1082
1083 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
1084 return qlcnic_pci_mem_access_direct(adapter,
1085 off, data, 0);
1086 }
1087
1088 return -EIO;
1089
1090correct:
1091 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
1092
1093 off8 = off & ~(stride-1);
1094
1095 mutex_lock(&adapter->ahw.mem_lock);
1096
1097 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1098 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1099 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1100 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1101
1102 for (j = 0; j < MAX_CTL_CHECK; j++) {
1103 temp = readl(mem_crb + TEST_AGT_CTRL);
1104 if ((temp & TA_CTL_BUSY) == 0)
1105 break;
1106 }
1107
1108 if (j >= MAX_CTL_CHECK) {
1109 if (printk_ratelimit())
1110 dev_err(&adapter->pdev->dev,
1111 "failed to read through agent\n");
1112 ret = -EIO;
1113 } else {
1114 off8 = MIU_TEST_AGT_RDDATA_LO;
1115 if ((stride == 16) && (off & 0xf))
1116 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1117
1118 temp = readl(mem_crb + off8 + 4);
1119 val = (u64)temp << 32;
1120 val |= readl(mem_crb + off8);
1121 *data = val;
1122 ret = 0;
1123 }
1124
1125 mutex_unlock(&adapter->ahw.mem_lock);
1126
1127 return ret;
1128}
1129
1130int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1131{
1132 int offset, board_type, magic;
1133 struct pci_dev *pdev = adapter->pdev;
1134
1135 offset = QLCNIC_FW_MAGIC_OFFSET;
1136 if (qlcnic_rom_fast_read(adapter, offset, &magic))
1137 return -EIO;
1138
1139 if (magic != QLCNIC_BDINFO_MAGIC) {
1140 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1141 magic);
1142 return -EIO;
1143 }
1144
1145 offset = QLCNIC_BRDTYPE_OFFSET;
1146 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1147 return -EIO;
1148
1149 adapter->ahw.board_type = board_type;
1150
1151 if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
1152 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1153 if ((gpio & 0x8000) == 0)
1154 board_type = QLCNIC_BRDTYPE_P3_10G_TP;
1155 }
1156
1157 switch (board_type) {
1158 case QLCNIC_BRDTYPE_P3_HMEZ:
1159 case QLCNIC_BRDTYPE_P3_XG_LOM:
1160 case QLCNIC_BRDTYPE_P3_10G_CX4:
1161 case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
1162 case QLCNIC_BRDTYPE_P3_IMEZ:
1163 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
1164 case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
1165 case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
1166 case QLCNIC_BRDTYPE_P3_10G_XFP:
1167 case QLCNIC_BRDTYPE_P3_10000_BASE_T:
1168 adapter->ahw.port_type = QLCNIC_XGBE;
1169 break;
1170 case QLCNIC_BRDTYPE_P3_REF_QG:
1171 case QLCNIC_BRDTYPE_P3_4_GB:
1172 case QLCNIC_BRDTYPE_P3_4_GB_MM:
1173 adapter->ahw.port_type = QLCNIC_GBE;
1174 break;
1175 case QLCNIC_BRDTYPE_P3_10G_TP:
1176 adapter->ahw.port_type = (adapter->portnum < 2) ?
1177 QLCNIC_XGBE : QLCNIC_GBE;
1178 break;
1179 default:
1180 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1181 adapter->ahw.port_type = QLCNIC_XGBE;
1182 break;
1183 }
1184
1185 return 0;
1186}
1187
1188int
1189qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1190{
1191 u32 wol_cfg;
1192
1193 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1194 if (wol_cfg & (1UL << adapter->portnum)) {
1195 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1196 if (wol_cfg & (1 << adapter->portnum))
1197 return 1;
1198 }
1199
1200 return 0;
1201}
1202
1203int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1204{
1205 struct qlcnic_nic_req req;
1206 int rv;
1207 u64 word;
1208
1209 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1210 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1211
1212 word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
1213 req.req_hdr = cpu_to_le64(word);
1214
1215 req.words[0] = cpu_to_le64((u64)rate << 32);
1216 req.words[1] = cpu_to_le64(state);
1217
1218 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1219 if (rv)
1220 dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
1221
1222 return rv;
1223}
1224
1225static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
1226{
1227 struct qlcnic_nic_req req;
1228 int rv;
1229 u64 word;
1230
1231 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1232 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1233
1234 word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
1235 ((u64)adapter->portnum << 16);
1236 req.req_hdr = cpu_to_le64(word);
1237 req.words[0] = cpu_to_le64(flag);
1238
1239 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1240 if (rv)
1241 dev_err(&adapter->pdev->dev,
1242 "%sting loopback mode failed.\n",
1243 flag ? "Set" : "Reset");
1244 return rv;
1245}
1246
1247int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
1248{
1249 if (qlcnic_set_fw_loopback(adapter, 1))
1250 return -EIO;
1251
1252 if (qlcnic_nic_set_promisc(adapter,
1253 VPORT_MISS_MODE_ACCEPT_ALL)) {
1254 qlcnic_set_fw_loopback(adapter, 0);
1255 return -EIO;
1256 }
1257
1258 msleep(1000);
1259 return 0;
1260}
1261
1262void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
1263{
1264 int mode = VPORT_MISS_MODE_DROP;
1265 struct net_device *netdev = adapter->netdev;
1266
1267 qlcnic_set_fw_loopback(adapter, 0);
1268
1269 if (netdev->flags & IFF_PROMISC)
1270 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1271 else if (netdev->flags & IFF_ALLMULTI)
1272 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1273
1274 qlcnic_nic_set_promisc(adapter, mode);
1275}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 00000000000..ea00ab4d4fe
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1541 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/netdevice.h>
26#include <linux/delay.h>
27#include "qlcnic.h"
28
29struct crb_addr_pair {
30 u32 addr;
31 u32 data;
32};
33
34#define QLCNIC_MAX_CRB_XFORM 60
35static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
36
37#define crb_addr_transform(name) \
38 (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
39 QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
40
41#define QLCNIC_ADDR_ERROR (0xffffffff)
42
43static void
44qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
45 struct qlcnic_host_rds_ring *rds_ring);
46
47static void crb_addr_transform_setup(void)
48{
49 crb_addr_transform(XDMA);
50 crb_addr_transform(TIMR);
51 crb_addr_transform(SRE);
52 crb_addr_transform(SQN3);
53 crb_addr_transform(SQN2);
54 crb_addr_transform(SQN1);
55 crb_addr_transform(SQN0);
56 crb_addr_transform(SQS3);
57 crb_addr_transform(SQS2);
58 crb_addr_transform(SQS1);
59 crb_addr_transform(SQS0);
60 crb_addr_transform(RPMX7);
61 crb_addr_transform(RPMX6);
62 crb_addr_transform(RPMX5);
63 crb_addr_transform(RPMX4);
64 crb_addr_transform(RPMX3);
65 crb_addr_transform(RPMX2);
66 crb_addr_transform(RPMX1);
67 crb_addr_transform(RPMX0);
68 crb_addr_transform(ROMUSB);
69 crb_addr_transform(SN);
70 crb_addr_transform(QMN);
71 crb_addr_transform(QMS);
72 crb_addr_transform(PGNI);
73 crb_addr_transform(PGND);
74 crb_addr_transform(PGN3);
75 crb_addr_transform(PGN2);
76 crb_addr_transform(PGN1);
77 crb_addr_transform(PGN0);
78 crb_addr_transform(PGSI);
79 crb_addr_transform(PGSD);
80 crb_addr_transform(PGS3);
81 crb_addr_transform(PGS2);
82 crb_addr_transform(PGS1);
83 crb_addr_transform(PGS0);
84 crb_addr_transform(PS);
85 crb_addr_transform(PH);
86 crb_addr_transform(NIU);
87 crb_addr_transform(I2Q);
88 crb_addr_transform(EG);
89 crb_addr_transform(MN);
90 crb_addr_transform(MS);
91 crb_addr_transform(CAS2);
92 crb_addr_transform(CAS1);
93 crb_addr_transform(CAS0);
94 crb_addr_transform(CAM);
95 crb_addr_transform(C2C1);
96 crb_addr_transform(C2C0);
97 crb_addr_transform(SMB);
98 crb_addr_transform(OCM0);
99 crb_addr_transform(I2C0);
100}
101
102void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
103{
104 struct qlcnic_recv_context *recv_ctx;
105 struct qlcnic_host_rds_ring *rds_ring;
106 struct qlcnic_rx_buffer *rx_buf;
107 int i, ring;
108
109 recv_ctx = &adapter->recv_ctx;
110 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
111 rds_ring = &recv_ctx->rds_rings[ring];
112 for (i = 0; i < rds_ring->num_desc; ++i) {
113 rx_buf = &(rds_ring->rx_buf_arr[i]);
114 if (rx_buf->state == QLCNIC_BUFFER_FREE)
115 continue;
116 pci_unmap_single(adapter->pdev,
117 rx_buf->dma,
118 rds_ring->dma_size,
119 PCI_DMA_FROMDEVICE);
120 if (rx_buf->skb != NULL)
121 dev_kfree_skb_any(rx_buf->skb);
122 }
123 }
124}
125
126void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
127{
128 struct qlcnic_cmd_buffer *cmd_buf;
129 struct qlcnic_skb_frag *buffrag;
130 int i, j;
131 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
132
133 cmd_buf = tx_ring->cmd_buf_arr;
134 for (i = 0; i < tx_ring->num_desc; i++) {
135 buffrag = cmd_buf->frag_array;
136 if (buffrag->dma) {
137 pci_unmap_single(adapter->pdev, buffrag->dma,
138 buffrag->length, PCI_DMA_TODEVICE);
139 buffrag->dma = 0ULL;
140 }
141 for (j = 0; j < cmd_buf->frag_count; j++) {
142 buffrag++;
143 if (buffrag->dma) {
144 pci_unmap_page(adapter->pdev, buffrag->dma,
145 buffrag->length,
146 PCI_DMA_TODEVICE);
147 buffrag->dma = 0ULL;
148 }
149 }
150 if (cmd_buf->skb) {
151 dev_kfree_skb_any(cmd_buf->skb);
152 cmd_buf->skb = NULL;
153 }
154 cmd_buf++;
155 }
156}
157
158void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
159{
160 struct qlcnic_recv_context *recv_ctx;
161 struct qlcnic_host_rds_ring *rds_ring;
162 struct qlcnic_host_tx_ring *tx_ring;
163 int ring;
164
165 recv_ctx = &adapter->recv_ctx;
166
167 if (recv_ctx->rds_rings == NULL)
168 goto skip_rds;
169
170 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
171 rds_ring = &recv_ctx->rds_rings[ring];
172 vfree(rds_ring->rx_buf_arr);
173 rds_ring->rx_buf_arr = NULL;
174 }
175 kfree(recv_ctx->rds_rings);
176
177skip_rds:
178 if (adapter->tx_ring == NULL)
179 return;
180
181 tx_ring = adapter->tx_ring;
182 vfree(tx_ring->cmd_buf_arr);
183 kfree(adapter->tx_ring);
184}
185
186int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
187{
188 struct qlcnic_recv_context *recv_ctx;
189 struct qlcnic_host_rds_ring *rds_ring;
190 struct qlcnic_host_sds_ring *sds_ring;
191 struct qlcnic_host_tx_ring *tx_ring;
192 struct qlcnic_rx_buffer *rx_buf;
193 int ring, i, size;
194
195 struct qlcnic_cmd_buffer *cmd_buf_arr;
196 struct net_device *netdev = adapter->netdev;
197
198 size = sizeof(struct qlcnic_host_tx_ring);
199 tx_ring = kzalloc(size, GFP_KERNEL);
200 if (tx_ring == NULL) {
201 dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
202 return -ENOMEM;
203 }
204 adapter->tx_ring = tx_ring;
205
206 tx_ring->num_desc = adapter->num_txd;
207 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
208
209 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
210 if (cmd_buf_arr == NULL) {
211 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
212 return -ENOMEM;
213 }
214 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
215 tx_ring->cmd_buf_arr = cmd_buf_arr;
216
217 recv_ctx = &adapter->recv_ctx;
218
219 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
220 rds_ring = kzalloc(size, GFP_KERNEL);
221 if (rds_ring == NULL) {
222 dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
223 return -ENOMEM;
224 }
225 recv_ctx->rds_rings = rds_ring;
226
227 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
228 rds_ring = &recv_ctx->rds_rings[ring];
229 switch (ring) {
230 case RCV_RING_NORMAL:
231 rds_ring->num_desc = adapter->num_rxd;
232 if (adapter->ahw.cut_through) {
233 rds_ring->dma_size =
234 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
235 rds_ring->skb_size =
236 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
237 } else {
238 rds_ring->dma_size =
239 QLCNIC_P3_RX_BUF_MAX_LEN;
240 rds_ring->skb_size =
241 rds_ring->dma_size + NET_IP_ALIGN;
242 }
243 break;
244
245 case RCV_RING_JUMBO:
246 rds_ring->num_desc = adapter->num_jumbo_rxd;
247 rds_ring->dma_size =
248 QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
249
250 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
251 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
252
253 rds_ring->skb_size =
254 rds_ring->dma_size + NET_IP_ALIGN;
255 break;
256
257 case RCV_RING_LRO:
258 rds_ring->num_desc = adapter->num_lro_rxd;
259 rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
260 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
261 break;
262
263 }
264 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
265 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
266 if (rds_ring->rx_buf_arr == NULL) {
267 dev_err(&netdev->dev, "Failed to allocate "
268 "rx buffer ring %d\n", ring);
269 goto err_out;
270 }
271 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
272 INIT_LIST_HEAD(&rds_ring->free_list);
273 /*
274 * Now go through all of them, set reference handles
275 * and put them in the queues.
276 */
277 rx_buf = rds_ring->rx_buf_arr;
278 for (i = 0; i < rds_ring->num_desc; i++) {
279 list_add_tail(&rx_buf->list,
280 &rds_ring->free_list);
281 rx_buf->ref_handle = i;
282 rx_buf->state = QLCNIC_BUFFER_FREE;
283 rx_buf++;
284 }
285 spin_lock_init(&rds_ring->lock);
286 }
287
288 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
289 sds_ring = &recv_ctx->sds_rings[ring];
290 sds_ring->irq = adapter->msix_entries[ring].vector;
291 sds_ring->adapter = adapter;
292 sds_ring->num_desc = adapter->num_rxd;
293
294 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
295 INIT_LIST_HEAD(&sds_ring->free_list[i]);
296 }
297
298 return 0;
299
300err_out:
301 qlcnic_free_sw_resources(adapter);
302 return -ENOMEM;
303}
304
305/*
306 * Utility to translate from internal Phantom CRB address
307 * to external PCI CRB address.
308 */
309static u32 qlcnic_decode_crb_addr(u32 addr)
310{
311 int i;
312 u32 base_addr, offset, pci_base;
313
314 crb_addr_transform_setup();
315
316 pci_base = QLCNIC_ADDR_ERROR;
317 base_addr = addr & 0xfff00000;
318 offset = addr & 0x000fffff;
319
320 for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
321 if (crb_addr_xform[i] == base_addr) {
322 pci_base = i << 20;
323 break;
324 }
325 }
326 if (pci_base == QLCNIC_ADDR_ERROR)
327 return pci_base;
328 else
329 return pci_base + offset;
330}
331
332#define QLCNIC_MAX_ROM_WAIT_USEC 100
333
334static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
335{
336 long timeout = 0;
337 long done = 0;
338
339 cond_resched();
340
341 while (done == 0) {
342 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
343 done &= 2;
344 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
345 dev_err(&adapter->pdev->dev,
346 "Timeout reached waiting for rom done");
347 return -EIO;
348 }
349 udelay(1);
350 }
351 return 0;
352}
353
354static int do_rom_fast_read(struct qlcnic_adapter *adapter,
355 int addr, int *valp)
356{
357 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
358 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
359 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
360 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
361 if (qlcnic_wait_rom_done(adapter)) {
362 dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
363 return -EIO;
364 }
365 /* reset abyte_cnt and dummy_byte_cnt */
366 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
367 udelay(10);
368 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
369
370 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
371 return 0;
372}
373
374static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
375 u8 *bytes, size_t size)
376{
377 int addridx;
378 int ret = 0;
379
380 for (addridx = addr; addridx < (addr + size); addridx += 4) {
381 int v;
382 ret = do_rom_fast_read(adapter, addridx, &v);
383 if (ret != 0)
384 break;
385 *(__le32 *)bytes = cpu_to_le32(v);
386 bytes += 4;
387 }
388
389 return ret;
390}
391
392int
393qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
394 u8 *bytes, size_t size)
395{
396 int ret;
397
398 ret = qlcnic_rom_lock(adapter);
399 if (ret < 0)
400 return ret;
401
402 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
403
404 qlcnic_rom_unlock(adapter);
405 return ret;
406}
407
408int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
409{
410 int ret;
411
412 if (qlcnic_rom_lock(adapter) != 0)
413 return -EIO;
414
415 ret = do_rom_fast_read(adapter, addr, valp);
416 qlcnic_rom_unlock(adapter);
417 return ret;
418}
419
420int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
421{
422 int addr, val;
423 int i, n, init_delay;
424 struct crb_addr_pair *buf;
425 unsigned offset;
426 u32 off;
427 struct pci_dev *pdev = adapter->pdev;
428
429 /* resetall */
430 qlcnic_rom_lock(adapter);
431 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
432 qlcnic_rom_unlock(adapter);
433
434 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
435 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
436 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
437 return -EIO;
438 }
439 offset = n & 0xffffU;
440 n = (n >> 16) & 0xffffU;
441
442 if (n >= 1024) {
443 dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
444 return -EIO;
445 }
446
447 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
448 if (buf == NULL) {
449 dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
450 return -ENOMEM;
451 }
452
453 for (i = 0; i < n; i++) {
454 if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
455 qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
456 kfree(buf);
457 return -EIO;
458 }
459
460 buf[i].addr = addr;
461 buf[i].data = val;
462 }
463
464 for (i = 0; i < n; i++) {
465
466 off = qlcnic_decode_crb_addr(buf[i].addr);
467 if (off == QLCNIC_ADDR_ERROR) {
468 dev_err(&pdev->dev, "CRB init value out of range %x\n",
469 buf[i].addr);
470 continue;
471 }
472 off += QLCNIC_PCI_CRBSPACE;
473
474 if (off & 1)
475 continue;
476
477 /* skipping cold reboot MAGIC */
478 if (off == QLCNIC_CAM_RAM(0x1fc))
479 continue;
480 if (off == (QLCNIC_CRB_I2C0 + 0x1c))
481 continue;
482 if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
483 continue;
484 if (off == (ROMUSB_GLB + 0xa8))
485 continue;
486 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
487 continue;
488 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
489 continue;
490 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
491 continue;
492 if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
493 continue;
494 /* skip the function enable register */
495 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
496 continue;
497 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
498 continue;
499 if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
500 continue;
501
502 init_delay = 1;
503 /* After writing this register, HW needs time for CRB */
504 /* to quiet down (else crb_window returns 0xffffffff) */
505 if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
506 init_delay = 1000;
507
508 QLCWR32(adapter, off, buf[i].data);
509
510 msleep(init_delay);
511 }
512 kfree(buf);
513
514 /* p2dn replyCount */
515 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
516 /* disable_peg_cache 0 & 1*/
517 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
518 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
519
520 /* peg_clr_all */
521 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
522 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
523 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
524 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
525 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
526 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
527 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
528 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
529 return 0;
530}
531
532static int
533qlcnic_has_mn(struct qlcnic_adapter *adapter)
534{
535 u32 capability, flashed_ver;
536 capability = 0;
537
538 qlcnic_rom_fast_read(adapter,
539 QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
540 flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
541
542 if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
543
544 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
545 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
546 return 1;
547 }
548 return 0;
549}
550
551static
552struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
553{
554 u32 i;
555 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
556 __le32 entries = cpu_to_le32(directory->num_entries);
557
558 for (i = 0; i < entries; i++) {
559
560 __le32 offs = cpu_to_le32(directory->findex) +
561 (i * cpu_to_le32(directory->entry_size));
562 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
563
564 if (tab_type == section)
565 return (struct uni_table_desc *) &unirom[offs];
566 }
567
568 return NULL;
569}
570
571static int
572qlcnic_set_product_offs(struct qlcnic_adapter *adapter)
573{
574 struct uni_table_desc *ptab_descr;
575 const u8 *unirom = adapter->fw->data;
576 u32 i;
577 __le32 entries;
578 int mn_present = qlcnic_has_mn(adapter);
579
580 ptab_descr = qlcnic_get_table_desc(unirom,
581 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
582 if (ptab_descr == NULL)
583 return -1;
584
585 entries = cpu_to_le32(ptab_descr->num_entries);
586nomn:
587 for (i = 0; i < entries; i++) {
588
589 __le32 flags, file_chiprev, offs;
590 u8 chiprev = adapter->ahw.revision_id;
591 u32 flagbit;
592
593 offs = cpu_to_le32(ptab_descr->findex) +
594 (i * cpu_to_le32(ptab_descr->entry_size));
595 flags = cpu_to_le32(*((int *)&unirom[offs] +
596 QLCNIC_UNI_FLAGS_OFF));
597 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
598 QLCNIC_UNI_CHIP_REV_OFF));
599
600 flagbit = mn_present ? 1 : 2;
601
602 if ((chiprev == file_chiprev) &&
603 ((1ULL << flagbit) & flags)) {
604 adapter->file_prd_off = offs;
605 return 0;
606 }
607 }
608 if (mn_present) {
609 mn_present = 0;
610 goto nomn;
611 }
612 return -1;
613}
614
615static
616struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
617 u32 section, u32 idx_offset)
618{
619 const u8 *unirom = adapter->fw->data;
620 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
621 idx_offset));
622 struct uni_table_desc *tab_desc;
623 __le32 offs;
624
625 tab_desc = qlcnic_get_table_desc(unirom, section);
626
627 if (tab_desc == NULL)
628 return NULL;
629
630 offs = cpu_to_le32(tab_desc->findex) +
631 (cpu_to_le32(tab_desc->entry_size) * idx);
632
633 return (struct uni_data_desc *)&unirom[offs];
634}
635
636static u8 *
637qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
638{
639 u32 offs = QLCNIC_BOOTLD_START;
640
641 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
642 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
643 QLCNIC_UNI_DIR_SECT_BOOTLD,
644 QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
645
646 return (u8 *)&adapter->fw->data[offs];
647}
648
649static u8 *
650qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
651{
652 u32 offs = QLCNIC_IMAGE_START;
653
654 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
655 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
656 QLCNIC_UNI_DIR_SECT_FW,
657 QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
658
659 return (u8 *)&adapter->fw->data[offs];
660}
661
662static __le32
663qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
664{
665 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
666 return cpu_to_le32((qlcnic_get_data_desc(adapter,
667 QLCNIC_UNI_DIR_SECT_FW,
668 QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
669 else
670 return cpu_to_le32(
671 *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
672}
673
674static __le32
675qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
676{
677 struct uni_data_desc *fw_data_desc;
678 const struct firmware *fw = adapter->fw;
679 __le32 major, minor, sub;
680 const u8 *ver_str;
681 int i, ret;
682
683 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
684 return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
685
686 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
687 QLCNIC_UNI_FIRMWARE_IDX_OFF);
688 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
689 cpu_to_le32(fw_data_desc->size) - 17;
690
691 for (i = 0; i < 12; i++) {
692 if (!strncmp(&ver_str[i], "REV=", 4)) {
693 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
694 &major, &minor, &sub);
695 if (ret != 3)
696 return 0;
697 else
698 return major + (minor << 8) + (sub << 16);
699 }
700 }
701
702 return 0;
703}
704
705static __le32
706qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
707{
708 const struct firmware *fw = adapter->fw;
709 __le32 bios_ver, prd_off = adapter->file_prd_off;
710
711 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
712 return cpu_to_le32(
713 *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
714
715 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
716 + QLCNIC_UNI_BIOS_VERSION_OFF));
717
718 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
719}
720
721int
722qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
723{
724 u32 count, old_count;
725 u32 val, version, major, minor, build;
726 int i, timeout;
727
728 if (adapter->need_fw_reset)
729 return 1;
730
731 /* last attempt had failed */
732 if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
733 return 1;
734
735 old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
736
737 for (i = 0; i < 10; i++) {
738
739 timeout = msleep_interruptible(200);
740 if (timeout) {
741 QLCWR32(adapter, CRB_CMDPEG_STATE,
742 PHAN_INITIALIZE_FAILED);
743 return -EINTR;
744 }
745
746 count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
747 if (count != old_count)
748 break;
749 }
750
751 /* firmware is dead */
752 if (count == old_count)
753 return 1;
754
755 /* check if we have got newer or different file firmware */
756 if (adapter->fw) {
757
758 val = qlcnic_get_fw_version(adapter);
759
760 version = QLCNIC_DECODE_VERSION(val);
761
762 major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
763 minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
764 build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
765
766 if (version > QLCNIC_VERSION_CODE(major, minor, build))
767 return 1;
768 }
769
770 return 0;
771}
772
773static const char *fw_name[] = {
774 QLCNIC_UNIFIED_ROMIMAGE_NAME,
775 QLCNIC_FLASH_ROMIMAGE_NAME,
776};
777
778int
779qlcnic_load_firmware(struct qlcnic_adapter *adapter)
780{
781 u64 *ptr64;
782 u32 i, flashaddr, size;
783 const struct firmware *fw = adapter->fw;
784 struct pci_dev *pdev = adapter->pdev;
785
786 dev_info(&pdev->dev, "loading firmware from %s\n",
787 fw_name[adapter->fw_type]);
788
789 if (fw) {
790 __le64 data;
791
792 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
793
794 ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
795 flashaddr = QLCNIC_BOOTLD_START;
796
797 for (i = 0; i < size; i++) {
798 data = cpu_to_le64(ptr64[i]);
799
800 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
801 return -EIO;
802
803 flashaddr += 8;
804 }
805
806 size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
807
808 ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
809 flashaddr = QLCNIC_IMAGE_START;
810
811 for (i = 0; i < size; i++) {
812 data = cpu_to_le64(ptr64[i]);
813
814 if (qlcnic_pci_mem_write_2M(adapter,
815 flashaddr, data))
816 return -EIO;
817
818 flashaddr += 8;
819 }
820 } else {
821 u64 data;
822 u32 hi, lo;
823
824 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
825 flashaddr = QLCNIC_BOOTLD_START;
826
827 for (i = 0; i < size; i++) {
828 if (qlcnic_rom_fast_read(adapter,
829 flashaddr, (int *)&lo) != 0)
830 return -EIO;
831 if (qlcnic_rom_fast_read(adapter,
832 flashaddr + 4, (int *)&hi) != 0)
833 return -EIO;
834
835 data = (((u64)hi << 32) | lo);
836
837 if (qlcnic_pci_mem_write_2M(adapter,
838 flashaddr, data))
839 return -EIO;
840
841 flashaddr += 8;
842 }
843 }
844 msleep(1);
845
846 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
847 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
848 return 0;
849}
850
851static int
852qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
853{
854 __le32 val;
855 u32 ver, min_ver, bios, min_size;
856 struct pci_dev *pdev = adapter->pdev;
857 const struct firmware *fw = adapter->fw;
858 u8 fw_type = adapter->fw_type;
859
860 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
861 if (qlcnic_set_product_offs(adapter))
862 return -EINVAL;
863
864 min_size = QLCNIC_UNI_FW_MIN_SIZE;
865 } else {
866 val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
867 if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
868 return -EINVAL;
869
870 min_size = QLCNIC_FW_MIN_SIZE;
871 }
872
873 if (fw->size < min_size)
874 return -EINVAL;
875
876 val = qlcnic_get_fw_version(adapter);
877
878 min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
879
880 ver = QLCNIC_DECODE_VERSION(val);
881
882 if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
883 dev_err(&pdev->dev,
884 "%s: firmware version %d.%d.%d unsupported\n",
885 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
886 return -EINVAL;
887 }
888
889 val = qlcnic_get_bios_version(adapter);
890 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
891 if ((__force u32)val != bios) {
892 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
893 fw_name[fw_type]);
894 return -EINVAL;
895 }
896
897 /* check if flashed firmware is newer */
898 if (qlcnic_rom_fast_read(adapter,
899 QLCNIC_FW_VERSION_OFFSET, (int *)&val))
900 return -EIO;
901
902 val = QLCNIC_DECODE_VERSION(val);
903 if (val > ver) {
904 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
905 fw_name[fw_type]);
906 return -EINVAL;
907 }
908
909 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
910 return 0;
911}
912
913static void
914qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
915{
916 u8 fw_type;
917
918 switch (adapter->fw_type) {
919 case QLCNIC_UNKNOWN_ROMIMAGE:
920 fw_type = QLCNIC_UNIFIED_ROMIMAGE;
921 break;
922
923 case QLCNIC_UNIFIED_ROMIMAGE:
924 default:
925 fw_type = QLCNIC_FLASH_ROMIMAGE;
926 break;
927 }
928
929 adapter->fw_type = fw_type;
930}
931
932
933
934void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
935{
936 struct pci_dev *pdev = adapter->pdev;
937 int rc;
938
939 adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
940
941next:
942 qlcnic_get_next_fwtype(adapter);
943
944 if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
945 adapter->fw = NULL;
946 } else {
947 rc = request_firmware(&adapter->fw,
948 fw_name[adapter->fw_type], &pdev->dev);
949 if (rc != 0)
950 goto next;
951
952 rc = qlcnic_validate_firmware(adapter);
953 if (rc != 0) {
954 release_firmware(adapter->fw);
955 msleep(1);
956 goto next;
957 }
958 }
959}
960
961
962void
963qlcnic_release_firmware(struct qlcnic_adapter *adapter)
964{
965 if (adapter->fw)
966 release_firmware(adapter->fw);
967 adapter->fw = NULL;
968}
969
970int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
971{
972 u32 val;
973 int retries = 60;
974
975 do {
976 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
977
978 switch (val) {
979 case PHAN_INITIALIZE_COMPLETE:
980 case PHAN_INITIALIZE_ACK:
981 return 0;
982 case PHAN_INITIALIZE_FAILED:
983 goto out_err;
984 default:
985 break;
986 }
987
988 msleep(500);
989
990 } while (--retries);
991
992 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
993
994out_err:
995 dev_err(&adapter->pdev->dev, "firmware init failed\n");
996 return -EIO;
997}
998
999static int
1000qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
1001{
1002 u32 val;
1003 int retries = 2000;
1004
1005 do {
1006 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
1007
1008 if (val == PHAN_PEG_RCV_INITIALIZED)
1009 return 0;
1010
1011 msleep(10);
1012
1013 } while (--retries);
1014
1015 if (!retries) {
1016 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
1017 "complete, state: 0x%x.\n", val);
1018 return -EIO;
1019 }
1020
1021 return 0;
1022}
1023
1024int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
1025{
1026 int err;
1027
1028 err = qlcnic_receive_peg_ready(adapter);
1029 if (err)
1030 return err;
1031
1032 QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
1033 QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
1034 QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
1035 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
1036
1037 return err;
1038}
1039
1040static void
1041qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1042 struct qlcnic_fw_msg *msg)
1043{
1044 u32 cable_OUI;
1045 u16 cable_len;
1046 u16 link_speed;
1047 u8 link_status, module, duplex, autoneg;
1048 struct net_device *netdev = adapter->netdev;
1049
1050 adapter->has_link_events = 1;
1051
1052 cable_OUI = msg->body[1] & 0xffffffff;
1053 cable_len = (msg->body[1] >> 32) & 0xffff;
1054 link_speed = (msg->body[1] >> 48) & 0xffff;
1055
1056 link_status = msg->body[2] & 0xff;
1057 duplex = (msg->body[2] >> 16) & 0xff;
1058 autoneg = (msg->body[2] >> 24) & 0xff;
1059
1060 module = (msg->body[2] >> 8) & 0xff;
1061 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
1062 dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
1063 "length %d\n", cable_OUI, cable_len);
1064 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1065 dev_info(&netdev->dev, "unsupported cable length %d\n",
1066 cable_len);
1067
1068 qlcnic_advert_link_change(adapter, link_status);
1069
1070 if (duplex == LINKEVENT_FULL_DUPLEX)
1071 adapter->link_duplex = DUPLEX_FULL;
1072 else
1073 adapter->link_duplex = DUPLEX_HALF;
1074
1075 adapter->module_type = module;
1076 adapter->link_autoneg = autoneg;
1077 adapter->link_speed = link_speed;
1078}
1079
1080static void
1081qlcnic_handle_fw_message(int desc_cnt, int index,
1082 struct qlcnic_host_sds_ring *sds_ring)
1083{
1084 struct qlcnic_fw_msg msg;
1085 struct status_desc *desc;
1086 int i = 0, opcode;
1087
1088 while (desc_cnt > 0 && i < 8) {
1089 desc = &sds_ring->desc_head[index];
1090 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1091 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1092
1093 index = get_next_index(index, sds_ring->num_desc);
1094 desc_cnt--;
1095 }
1096
1097 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1098 switch (opcode) {
1099 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1100 qlcnic_handle_linkevent(sds_ring->adapter, &msg);
1101 break;
1102 default:
1103 break;
1104 }
1105}
1106
1107static int
1108qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1109 struct qlcnic_host_rds_ring *rds_ring,
1110 struct qlcnic_rx_buffer *buffer)
1111{
1112 struct sk_buff *skb;
1113 dma_addr_t dma;
1114 struct pci_dev *pdev = adapter->pdev;
1115
1116 buffer->skb = dev_alloc_skb(rds_ring->skb_size);
1117 if (!buffer->skb)
1118 return -ENOMEM;
1119
1120 skb = buffer->skb;
1121
1122 if (!adapter->ahw.cut_through)
1123 skb_reserve(skb, 2);
1124
1125 dma = pci_map_single(pdev, skb->data,
1126 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1127
1128 if (pci_dma_mapping_error(pdev, dma)) {
1129 dev_kfree_skb_any(skb);
1130 buffer->skb = NULL;
1131 return -ENOMEM;
1132 }
1133
1134 buffer->skb = skb;
1135 buffer->dma = dma;
1136 buffer->state = QLCNIC_BUFFER_BUSY;
1137
1138 return 0;
1139}
1140
1141static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1142 struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
1143{
1144 struct qlcnic_rx_buffer *buffer;
1145 struct sk_buff *skb;
1146
1147 buffer = &rds_ring->rx_buf_arr[index];
1148
1149 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1150 PCI_DMA_FROMDEVICE);
1151
1152 skb = buffer->skb;
1153 if (!skb)
1154 goto no_skb;
1155
1156 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1157 adapter->stats.csummed++;
1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 } else {
1160 skb->ip_summed = CHECKSUM_NONE;
1161 }
1162
1163 skb->dev = adapter->netdev;
1164
1165 buffer->skb = NULL;
1166no_skb:
1167 buffer->state = QLCNIC_BUFFER_FREE;
1168 return skb;
1169}
1170
1171static struct qlcnic_rx_buffer *
1172qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1173 struct qlcnic_host_sds_ring *sds_ring,
1174 int ring, u64 sts_data0)
1175{
1176 struct net_device *netdev = adapter->netdev;
1177 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1178 struct qlcnic_rx_buffer *buffer;
1179 struct sk_buff *skb;
1180 struct qlcnic_host_rds_ring *rds_ring;
1181 int index, length, cksum, pkt_offset;
1182
1183 if (unlikely(ring >= adapter->max_rds_rings))
1184 return NULL;
1185
1186 rds_ring = &recv_ctx->rds_rings[ring];
1187
1188 index = qlcnic_get_sts_refhandle(sts_data0);
1189 if (unlikely(index >= rds_ring->num_desc))
1190 return NULL;
1191
1192 buffer = &rds_ring->rx_buf_arr[index];
1193
1194 length = qlcnic_get_sts_totallength(sts_data0);
1195 cksum = qlcnic_get_sts_status(sts_data0);
1196 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1197
1198 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1199 if (!skb)
1200 return buffer;
1201
1202 if (length > rds_ring->skb_size)
1203 skb_put(skb, rds_ring->skb_size);
1204 else
1205 skb_put(skb, length);
1206
1207 if (pkt_offset)
1208 skb_pull(skb, pkt_offset);
1209
1210 skb->truesize = skb->len + sizeof(struct sk_buff);
1211 skb->protocol = eth_type_trans(skb, netdev);
1212
1213 napi_gro_receive(&sds_ring->napi, skb);
1214
1215 adapter->stats.rx_pkts++;
1216 adapter->stats.rxbytes += length;
1217
1218 return buffer;
1219}
1220
1221#define QLC_TCP_HDR_SIZE 20
1222#define QLC_TCP_TS_OPTION_SIZE 12
1223#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1224
1225static struct qlcnic_rx_buffer *
1226qlcnic_process_lro(struct qlcnic_adapter *adapter,
1227 struct qlcnic_host_sds_ring *sds_ring,
1228 int ring, u64 sts_data0, u64 sts_data1)
1229{
1230 struct net_device *netdev = adapter->netdev;
1231 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1232 struct qlcnic_rx_buffer *buffer;
1233 struct sk_buff *skb;
1234 struct qlcnic_host_rds_ring *rds_ring;
1235 struct iphdr *iph;
1236 struct tcphdr *th;
1237 bool push, timestamp;
1238 int l2_hdr_offset, l4_hdr_offset;
1239 int index;
1240 u16 lro_length, length, data_offset;
1241 u32 seq_number;
1242
1243 if (unlikely(ring > adapter->max_rds_rings))
1244 return NULL;
1245
1246 rds_ring = &recv_ctx->rds_rings[ring];
1247
1248 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1249 if (unlikely(index > rds_ring->num_desc))
1250 return NULL;
1251
1252 buffer = &rds_ring->rx_buf_arr[index];
1253
1254 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1255 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1256 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1257 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1258 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1259 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1260
1261 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1262 if (!skb)
1263 return buffer;
1264
1265 if (timestamp)
1266 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1267 else
1268 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1269
1270 skb_put(skb, lro_length + data_offset);
1271
1272 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1273
1274 skb_pull(skb, l2_hdr_offset);
1275 skb->protocol = eth_type_trans(skb, netdev);
1276
1277 iph = (struct iphdr *)skb->data;
1278 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1279
1280 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1281 iph->tot_len = htons(length);
1282 iph->check = 0;
1283 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1284 th->psh = push;
1285 th->seq = htonl(seq_number);
1286
1287 length = skb->len;
1288
1289 netif_receive_skb(skb);
1290
1291 adapter->stats.lro_pkts++;
1292 adapter->stats.rxbytes += length;
1293
1294 return buffer;
1295}
1296
1297int
1298qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1299{
1300 struct qlcnic_adapter *adapter = sds_ring->adapter;
1301 struct list_head *cur;
1302 struct status_desc *desc;
1303 struct qlcnic_rx_buffer *rxbuf;
1304 u64 sts_data0, sts_data1;
1305
1306 int count = 0;
1307 int opcode, ring, desc_cnt;
1308 u32 consumer = sds_ring->consumer;
1309
1310 while (count < max) {
1311 desc = &sds_ring->desc_head[consumer];
1312 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1313
1314 if (!(sts_data0 & STATUS_OWNER_HOST))
1315 break;
1316
1317 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1318 opcode = qlcnic_get_sts_opcode(sts_data0);
1319
1320 switch (opcode) {
1321 case QLCNIC_RXPKT_DESC:
1322 case QLCNIC_OLD_RXPKT_DESC:
1323 case QLCNIC_SYN_OFFLOAD:
1324 ring = qlcnic_get_sts_type(sts_data0);
1325 rxbuf = qlcnic_process_rcv(adapter, sds_ring,
1326 ring, sts_data0);
1327 break;
1328 case QLCNIC_LRO_DESC:
1329 ring = qlcnic_get_lro_sts_type(sts_data0);
1330 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1331 rxbuf = qlcnic_process_lro(adapter, sds_ring,
1332 ring, sts_data0, sts_data1);
1333 break;
1334 case QLCNIC_RESPONSE_DESC:
1335 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1336 default:
1337 goto skip;
1338 }
1339
1340 WARN_ON(desc_cnt > 1);
1341
1342 if (rxbuf)
1343 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1344
1345skip:
1346 for (; desc_cnt > 0; desc_cnt--) {
1347 desc = &sds_ring->desc_head[consumer];
1348 desc->status_desc_data[0] =
1349 cpu_to_le64(STATUS_OWNER_PHANTOM);
1350 consumer = get_next_index(consumer, sds_ring->num_desc);
1351 }
1352 count++;
1353 }
1354
1355 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1356 struct qlcnic_host_rds_ring *rds_ring =
1357 &adapter->recv_ctx.rds_rings[ring];
1358
1359 if (!list_empty(&sds_ring->free_list[ring])) {
1360 list_for_each(cur, &sds_ring->free_list[ring]) {
1361 rxbuf = list_entry(cur,
1362 struct qlcnic_rx_buffer, list);
1363 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1364 }
1365 spin_lock(&rds_ring->lock);
1366 list_splice_tail_init(&sds_ring->free_list[ring],
1367 &rds_ring->free_list);
1368 spin_unlock(&rds_ring->lock);
1369 }
1370
1371 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1372 }
1373
1374 if (count) {
1375 sds_ring->consumer = consumer;
1376 writel(consumer, sds_ring->crb_sts_consumer);
1377 }
1378
1379 return count;
1380}
1381
1382void
1383qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1384 struct qlcnic_host_rds_ring *rds_ring)
1385{
1386 struct rcv_desc *pdesc;
1387 struct qlcnic_rx_buffer *buffer;
1388 int producer, count = 0;
1389 struct list_head *head;
1390
1391 producer = rds_ring->producer;
1392
1393 spin_lock(&rds_ring->lock);
1394 head = &rds_ring->free_list;
1395 while (!list_empty(head)) {
1396
1397 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1398
1399 if (!buffer->skb) {
1400 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1401 break;
1402 }
1403
1404 count++;
1405 list_del(&buffer->list);
1406
1407 /* make a rcv descriptor */
1408 pdesc = &rds_ring->desc_head[producer];
1409 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1410 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1411 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1412
1413 producer = get_next_index(producer, rds_ring->num_desc);
1414 }
1415 spin_unlock(&rds_ring->lock);
1416
1417 if (count) {
1418 rds_ring->producer = producer;
1419 writel((producer-1) & (rds_ring->num_desc-1),
1420 rds_ring->crb_rcv_producer);
1421 }
1422}
1423
1424static void
1425qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1426 struct qlcnic_host_rds_ring *rds_ring)
1427{
1428 struct rcv_desc *pdesc;
1429 struct qlcnic_rx_buffer *buffer;
1430 int producer, count = 0;
1431 struct list_head *head;
1432
1433 producer = rds_ring->producer;
1434 if (!spin_trylock(&rds_ring->lock))
1435 return;
1436
1437 head = &rds_ring->free_list;
1438 while (!list_empty(head)) {
1439
1440 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1441
1442 if (!buffer->skb) {
1443 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1444 break;
1445 }
1446
1447 count++;
1448 list_del(&buffer->list);
1449
1450 /* make a rcv descriptor */
1451 pdesc = &rds_ring->desc_head[producer];
1452 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1453 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1454 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1455
1456 producer = get_next_index(producer, rds_ring->num_desc);
1457 }
1458
1459 if (count) {
1460 rds_ring->producer = producer;
1461 writel((producer - 1) & (rds_ring->num_desc - 1),
1462 rds_ring->crb_rcv_producer);
1463 }
1464 spin_unlock(&rds_ring->lock);
1465}
1466
1467static struct qlcnic_rx_buffer *
1468qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1469 struct qlcnic_host_sds_ring *sds_ring,
1470 int ring, u64 sts_data0)
1471{
1472 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1473 struct qlcnic_rx_buffer *buffer;
1474 struct sk_buff *skb;
1475 struct qlcnic_host_rds_ring *rds_ring;
1476 int index, length, cksum, pkt_offset;
1477
1478 if (unlikely(ring >= adapter->max_rds_rings))
1479 return NULL;
1480
1481 rds_ring = &recv_ctx->rds_rings[ring];
1482
1483 index = qlcnic_get_sts_refhandle(sts_data0);
1484 if (unlikely(index >= rds_ring->num_desc))
1485 return NULL;
1486
1487 buffer = &rds_ring->rx_buf_arr[index];
1488
1489 length = qlcnic_get_sts_totallength(sts_data0);
1490 cksum = qlcnic_get_sts_status(sts_data0);
1491 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1492
1493 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1494 if (!skb)
1495 return buffer;
1496
1497 skb_put(skb, rds_ring->skb_size);
1498
1499 if (pkt_offset)
1500 skb_pull(skb, pkt_offset);
1501
1502 skb->truesize = skb->len + sizeof(struct sk_buff);
1503
1504 if (!qlcnic_check_loopback_buff(skb->data))
1505 adapter->diag_cnt++;
1506
1507 dev_kfree_skb_any(skb);
1508
1509 return buffer;
1510}
1511
1512void
1513qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1514{
1515 struct qlcnic_adapter *adapter = sds_ring->adapter;
1516 struct status_desc *desc;
1517 struct qlcnic_rx_buffer *rxbuf;
1518 u64 sts_data0;
1519
1520 int opcode, ring, desc_cnt;
1521 u32 consumer = sds_ring->consumer;
1522
1523 desc = &sds_ring->desc_head[consumer];
1524 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1525
1526 if (!(sts_data0 & STATUS_OWNER_HOST))
1527 return;
1528
1529 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1530 opcode = qlcnic_get_sts_opcode(sts_data0);
1531
1532 ring = qlcnic_get_sts_type(sts_data0);
1533 rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
1534 ring, sts_data0);
1535
1536 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1537 consumer = get_next_index(consumer, sds_ring->num_desc);
1538
1539 sds_ring->consumer = consumer;
1540 writel(consumer, sds_ring->crb_sts_consumer);
1541}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 00000000000..665e8e56b6a
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,2720 @@
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/vmalloc.h>
26#include <linux/interrupt.h>
27
28#include "qlcnic.h"
29
30#include <linux/dma-mapping.h>
31#include <linux/if_vlan.h>
32#include <net/ip.h>
33#include <linux/ipv6.h>
34#include <linux/inetdevice.h>
35#include <linux/sysfs.h>
36
37MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
38MODULE_LICENSE("GPL");
39MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
40MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
41
42char qlcnic_driver_name[] = "qlcnic";
43static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
44 QLCNIC_LINUX_VERSIONID;
45
46static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
47
48/* Default to restricted 1G auto-neg mode */
49static int wol_port_mode = 5;
50
51static int use_msi = 1;
52module_param(use_msi, int, 0644);
53MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
54
55static int use_msi_x = 1;
56module_param(use_msi_x, int, 0644);
57MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
58
59static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
60module_param(auto_fw_reset, int, 0644);
61MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
62
63static int __devinit qlcnic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65static void __devexit qlcnic_remove(struct pci_dev *pdev);
66static int qlcnic_open(struct net_device *netdev);
67static int qlcnic_close(struct net_device *netdev);
68static void qlcnic_tx_timeout(struct net_device *netdev);
69static void qlcnic_tx_timeout_task(struct work_struct *work);
70static void qlcnic_attach_work(struct work_struct *work);
71static void qlcnic_fwinit_work(struct work_struct *work);
72static void qlcnic_fw_poll_work(struct work_struct *work);
73static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
74 work_func_t func, int delay);
75static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
76static int qlcnic_poll(struct napi_struct *napi, int budget);
77#ifdef CONFIG_NET_POLL_CONTROLLER
78static void qlcnic_poll_controller(struct net_device *netdev);
79#endif
80
81static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
82static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
83static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
84static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
85
86static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
87static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
88
89static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
90static irqreturn_t qlcnic_intr(int irq, void *data);
91static irqreturn_t qlcnic_msi_intr(int irq, void *data);
92static irqreturn_t qlcnic_msix_intr(int irq, void *data);
93
94static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
95static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
96
97/* PCI Device ID Table */
98#define ENTRY(device) \
99 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
100 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
101
102#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
103
104static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
105 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
106 {0,}
107};
108
109MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
110
111
112void
113qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
114 struct qlcnic_host_tx_ring *tx_ring)
115{
116 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
117
118 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
119 netif_stop_queue(adapter->netdev);
120 smp_mb();
121 }
122}
123
124static const u32 msi_tgt_status[8] = {
125 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
126 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
127 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
128 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
129};
130
131static const
132struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
133
134static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
135{
136 writel(0, sds_ring->crb_intr_mask);
137}
138
139static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
140{
141 struct qlcnic_adapter *adapter = sds_ring->adapter;
142
143 writel(0x1, sds_ring->crb_intr_mask);
144
145 if (!QLCNIC_IS_MSI_FAMILY(adapter))
146 writel(0xfbff, adapter->tgt_mask_reg);
147}
148
149static int
150qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
151{
152 int size = sizeof(struct qlcnic_host_sds_ring) * count;
153
154 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
155
156 return (recv_ctx->sds_rings == NULL);
157}
158
159static void
160qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
161{
162 if (recv_ctx->sds_rings != NULL)
163 kfree(recv_ctx->sds_rings);
164
165 recv_ctx->sds_rings = NULL;
166}
167
168static int
169qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
170{
171 int ring;
172 struct qlcnic_host_sds_ring *sds_ring;
173 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
174
175 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
176 return -ENOMEM;
177
178 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
179 sds_ring = &recv_ctx->sds_rings[ring];
180 netif_napi_add(netdev, &sds_ring->napi,
181 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
182 }
183
184 return 0;
185}
186
187static void
188qlcnic_napi_del(struct qlcnic_adapter *adapter)
189{
190 int ring;
191 struct qlcnic_host_sds_ring *sds_ring;
192 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
193
194 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
195 sds_ring = &recv_ctx->sds_rings[ring];
196 netif_napi_del(&sds_ring->napi);
197 }
198
199 qlcnic_free_sds_rings(&adapter->recv_ctx);
200}
201
202static void
203qlcnic_napi_enable(struct qlcnic_adapter *adapter)
204{
205 int ring;
206 struct qlcnic_host_sds_ring *sds_ring;
207 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
208
209 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
210 sds_ring = &recv_ctx->sds_rings[ring];
211 napi_enable(&sds_ring->napi);
212 qlcnic_enable_int(sds_ring);
213 }
214}
215
216static void
217qlcnic_napi_disable(struct qlcnic_adapter *adapter)
218{
219 int ring;
220 struct qlcnic_host_sds_ring *sds_ring;
221 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
222
223 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
224 sds_ring = &recv_ctx->sds_rings[ring];
225 qlcnic_disable_int(sds_ring);
226 napi_synchronize(&sds_ring->napi);
227 napi_disable(&sds_ring->napi);
228 }
229}
230
231static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
232{
233 memset(&adapter->stats, 0, sizeof(adapter->stats));
234 return;
235}
236
237static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
238{
239 struct pci_dev *pdev = adapter->pdev;
240 u64 mask, cmask;
241
242 adapter->pci_using_dac = 0;
243
244 mask = DMA_BIT_MASK(39);
245 cmask = mask;
246
247 if (pci_set_dma_mask(pdev, mask) == 0 &&
248 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
249 adapter->pci_using_dac = 1;
250 return 0;
251 }
252
253 return -EIO;
254}
255
256/* Update addressable range if firmware supports it */
257static int
258qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
259{
260 int change, shift, err;
261 u64 mask, old_mask, old_cmask;
262 struct pci_dev *pdev = adapter->pdev;
263
264 change = 0;
265
266 shift = QLCRD32(adapter, CRB_DMA_SHIFT);
267 if (shift > 32)
268 return 0;
269
270 if (shift > 9)
271 change = 1;
272
273 if (change) {
274 old_mask = pdev->dma_mask;
275 old_cmask = pdev->dev.coherent_dma_mask;
276
277 mask = DMA_BIT_MASK(32+shift);
278
279 err = pci_set_dma_mask(pdev, mask);
280 if (err)
281 goto err_out;
282
283 err = pci_set_consistent_dma_mask(pdev, mask);
284 if (err)
285 goto err_out;
286 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
287 }
288
289 return 0;
290
291err_out:
292 pci_set_dma_mask(pdev, old_mask);
293 pci_set_consistent_dma_mask(pdev, old_cmask);
294 return err;
295}
296
297static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
298{
299 u32 val, data;
300
301 val = adapter->ahw.board_type;
302 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
303 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
304 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
305 data = QLCNIC_PORT_MODE_802_3_AP;
306 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
307 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
308 data = QLCNIC_PORT_MODE_XG;
309 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
310 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
311 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
312 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
313 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
314 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
315 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
316 } else {
317 data = QLCNIC_PORT_MODE_AUTO_NEG;
318 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
319 }
320
321 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
322 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
323 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
324 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
325 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
326 }
327 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
328 }
329}
330
331static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
332{
333 u32 control;
334 int pos;
335
336 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
337 if (pos) {
338 pci_read_config_dword(pdev, pos, &control);
339 if (enable)
340 control |= PCI_MSIX_FLAGS_ENABLE;
341 else
342 control = 0;
343 pci_write_config_dword(pdev, pos, control);
344 }
345}
346
347static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
348{
349 int i;
350
351 for (i = 0; i < count; i++)
352 adapter->msix_entries[i].entry = i;
353}
354
355static int
356qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
357{
358 int i;
359 unsigned char *p;
360 u64 mac_addr;
361 struct net_device *netdev = adapter->netdev;
362 struct pci_dev *pdev = adapter->pdev;
363
364 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
365 return -EIO;
366
367 p = (unsigned char *)&mac_addr;
368 for (i = 0; i < 6; i++)
369 netdev->dev_addr[i] = *(p + 5 - i);
370
371 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
372 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
373
374 /* set station address */
375
376 if (!is_valid_ether_addr(netdev->perm_addr))
377 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
378 netdev->dev_addr);
379
380 return 0;
381}
382
383static int qlcnic_set_mac(struct net_device *netdev, void *p)
384{
385 struct qlcnic_adapter *adapter = netdev_priv(netdev);
386 struct sockaddr *addr = p;
387
388 if (!is_valid_ether_addr(addr->sa_data))
389 return -EINVAL;
390
391 if (netif_running(netdev)) {
392 netif_device_detach(netdev);
393 qlcnic_napi_disable(adapter);
394 }
395
396 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
397 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
398 qlcnic_set_multi(adapter->netdev);
399
400 if (netif_running(netdev)) {
401 netif_device_attach(netdev);
402 qlcnic_napi_enable(adapter);
403 }
404 return 0;
405}
406
407static const struct net_device_ops qlcnic_netdev_ops = {
408 .ndo_open = qlcnic_open,
409 .ndo_stop = qlcnic_close,
410 .ndo_start_xmit = qlcnic_xmit_frame,
411 .ndo_get_stats = qlcnic_get_stats,
412 .ndo_validate_addr = eth_validate_addr,
413 .ndo_set_multicast_list = qlcnic_set_multi,
414 .ndo_set_mac_address = qlcnic_set_mac,
415 .ndo_change_mtu = qlcnic_change_mtu,
416 .ndo_tx_timeout = qlcnic_tx_timeout,
417#ifdef CONFIG_NET_POLL_CONTROLLER
418 .ndo_poll_controller = qlcnic_poll_controller,
419#endif
420};
421
422static void
423qlcnic_setup_intr(struct qlcnic_adapter *adapter)
424{
425 const struct qlcnic_legacy_intr_set *legacy_intrp;
426 struct pci_dev *pdev = adapter->pdev;
427 int err, num_msix;
428
429 if (adapter->rss_supported) {
430 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
431 MSIX_ENTRIES_PER_ADAPTER : 2;
432 } else
433 num_msix = 1;
434
435 adapter->max_sds_rings = 1;
436
437 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
438
439 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
440
441 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
442 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
443 legacy_intrp->tgt_status_reg);
444 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
445 legacy_intrp->tgt_mask_reg);
446 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
447
448 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
449 ISR_INT_STATE_REG);
450
451 qlcnic_set_msix_bit(pdev, 0);
452
453 if (adapter->msix_supported) {
454
455 qlcnic_init_msix_entries(adapter, num_msix);
456 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
457 if (err == 0) {
458 adapter->flags |= QLCNIC_MSIX_ENABLED;
459 qlcnic_set_msix_bit(pdev, 1);
460
461 if (adapter->rss_supported)
462 adapter->max_sds_rings = num_msix;
463
464 dev_info(&pdev->dev, "using msi-x interrupts\n");
465 return;
466 }
467
468 if (err > 0)
469 pci_disable_msix(pdev);
470
471 /* fall through for msi */
472 }
473
474 if (use_msi && !pci_enable_msi(pdev)) {
475 adapter->flags |= QLCNIC_MSI_ENABLED;
476 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
477 msi_tgt_status[adapter->ahw.pci_func]);
478 dev_info(&pdev->dev, "using msi interrupts\n");
479 adapter->msix_entries[0].vector = pdev->irq;
480 return;
481 }
482
483 dev_info(&pdev->dev, "using legacy interrupts\n");
484 adapter->msix_entries[0].vector = pdev->irq;
485}
486
487static void
488qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
489{
490 if (adapter->flags & QLCNIC_MSIX_ENABLED)
491 pci_disable_msix(adapter->pdev);
492 if (adapter->flags & QLCNIC_MSI_ENABLED)
493 pci_disable_msi(adapter->pdev);
494}
495
496static void
497qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
498{
499 if (adapter->ahw.pci_base0 != NULL)
500 iounmap(adapter->ahw.pci_base0);
501}
502
503static int
504qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
505{
506 void __iomem *mem_ptr0 = NULL;
507 resource_size_t mem_base;
508 unsigned long mem_len, pci_len0 = 0;
509
510 struct pci_dev *pdev = adapter->pdev;
511 int pci_func = adapter->ahw.pci_func;
512
513 /*
514 * Set the CRB window to invalid. If any register in window 0 is
515 * accessed it should set the window to 0 and then reset it to 1.
516 */
517 adapter->ahw.crb_win = -1;
518 adapter->ahw.ocm_win = -1;
519
520 /* remap phys address */
521 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
522 mem_len = pci_resource_len(pdev, 0);
523
524 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
525
526 mem_ptr0 = pci_ioremap_bar(pdev, 0);
527 if (mem_ptr0 == NULL) {
528 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
529 return -EIO;
530 }
531 pci_len0 = mem_len;
532 } else {
533 return -EIO;
534 }
535
536 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
537
538 adapter->ahw.pci_base0 = mem_ptr0;
539 adapter->ahw.pci_len0 = pci_len0;
540
541 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
542 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
543
544 return 0;
545}
546
547static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
548{
549 struct pci_dev *pdev = adapter->pdev;
550 int i, found = 0;
551
552 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
553 if (qlcnic_boards[i].vendor == pdev->vendor &&
554 qlcnic_boards[i].device == pdev->device &&
555 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
556 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
557 strcpy(name, qlcnic_boards[i].short_name);
558 found = 1;
559 break;
560 }
561
562 }
563
564 if (!found)
565 name = "Unknown";
566}
567
568static void
569qlcnic_check_options(struct qlcnic_adapter *adapter)
570{
571 u32 fw_major, fw_minor, fw_build;
572 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
573 char serial_num[32];
574 int i, offset, val;
575 int *ptr32;
576 struct pci_dev *pdev = adapter->pdev;
577
578 adapter->driver_mismatch = 0;
579
580 ptr32 = (int *)&serial_num;
581 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
582 for (i = 0; i < 8; i++) {
583 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
584 dev_err(&pdev->dev, "error reading board info\n");
585 adapter->driver_mismatch = 1;
586 return;
587 }
588 ptr32[i] = cpu_to_le32(val);
589 offset += sizeof(u32);
590 }
591
592 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
593 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
594 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
595
596 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
597
598 if (adapter->portnum == 0) {
599 get_brd_name(adapter, brd_name);
600
601 pr_info("%s: %s Board Chip rev 0x%x\n",
602 module_name(THIS_MODULE),
603 brd_name, adapter->ahw.revision_id);
604 }
605
606 if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
607 adapter->driver_mismatch = 1;
608 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
609 fw_major, fw_minor, fw_build);
610 return;
611 }
612
613 i = QLCRD32(adapter, QLCNIC_SRE_MISC);
614 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
615
616 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
617 fw_major, fw_minor, fw_build,
618 adapter->ahw.cut_through ? "cut-through" : "legacy");
619
620 if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
621 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
622
623 adapter->flags &= ~QLCNIC_LRO_ENABLED;
624
625 if (adapter->ahw.port_type == QLCNIC_XGBE) {
626 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
627 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
628 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
629 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
630 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
631 }
632
633 adapter->msix_supported = !!use_msi_x;
634 adapter->rss_supported = !!use_msi_x;
635
636 adapter->num_txd = MAX_CMD_DESCRIPTORS;
637
638 adapter->num_lro_rxd = 0;
639 adapter->max_rds_rings = 2;
640}
641
642static int
643qlcnic_start_firmware(struct qlcnic_adapter *adapter)
644{
645 int val, err, first_boot;
646
647 err = qlcnic_set_dma_mask(adapter);
648 if (err)
649 return err;
650
651 if (!qlcnic_can_start_firmware(adapter))
652 goto wait_init;
653
654 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
655 if (first_boot == 0x55555555)
656 /* This is the first boot after power up */
657 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
658
659 qlcnic_request_firmware(adapter);
660
661 err = qlcnic_need_fw_reset(adapter);
662 if (err < 0)
663 goto err_out;
664 if (err == 0)
665 goto wait_init;
666
667 if (first_boot != 0x55555555) {
668 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
669 qlcnic_pinit_from_rom(adapter);
670 msleep(1);
671 }
672
673 QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
674 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
675 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
676
677 qlcnic_set_port_mode(adapter);
678
679 err = qlcnic_load_firmware(adapter);
680 if (err)
681 goto err_out;
682
683 qlcnic_release_firmware(adapter);
684
685 val = (_QLCNIC_LINUX_MAJOR << 16)
686 | ((_QLCNIC_LINUX_MINOR << 8))
687 | (_QLCNIC_LINUX_SUBVERSION);
688 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
689
690wait_init:
691 /* Handshake with the card before we register the devices. */
692 err = qlcnic_phantom_init(adapter);
693 if (err)
694 goto err_out;
695
696 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
697
698 qlcnic_update_dma_mask(adapter);
699
700 qlcnic_check_options(adapter);
701
702 adapter->need_fw_reset = 0;
703
704 /* fall through and release firmware */
705
706err_out:
707 qlcnic_release_firmware(adapter);
708 return err;
709}
710
711static int
712qlcnic_request_irq(struct qlcnic_adapter *adapter)
713{
714 irq_handler_t handler;
715 struct qlcnic_host_sds_ring *sds_ring;
716 int err, ring;
717
718 unsigned long flags = 0;
719 struct net_device *netdev = adapter->netdev;
720 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
721
722 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
723 handler = qlcnic_tmp_intr;
724 if (!QLCNIC_IS_MSI_FAMILY(adapter))
725 flags |= IRQF_SHARED;
726
727 } else {
728 if (adapter->flags & QLCNIC_MSIX_ENABLED)
729 handler = qlcnic_msix_intr;
730 else if (adapter->flags & QLCNIC_MSI_ENABLED)
731 handler = qlcnic_msi_intr;
732 else {
733 flags |= IRQF_SHARED;
734 handler = qlcnic_intr;
735 }
736 }
737 adapter->irq = netdev->irq;
738
739 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
740 sds_ring = &recv_ctx->sds_rings[ring];
741 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
742 err = request_irq(sds_ring->irq, handler,
743 flags, sds_ring->name, sds_ring);
744 if (err)
745 return err;
746 }
747
748 return 0;
749}
750
751static void
752qlcnic_free_irq(struct qlcnic_adapter *adapter)
753{
754 int ring;
755 struct qlcnic_host_sds_ring *sds_ring;
756
757 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
758
759 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
760 sds_ring = &recv_ctx->sds_rings[ring];
761 free_irq(sds_ring->irq, sds_ring);
762 }
763}
764
765static void
766qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
767{
768 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
769 adapter->coal.normal.data.rx_time_us =
770 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
771 adapter->coal.normal.data.rx_packets =
772 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
773 adapter->coal.normal.data.tx_time_us =
774 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
775 adapter->coal.normal.data.tx_packets =
776 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
777}
778
779static int
780__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
781{
782 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
783 return -EIO;
784
785 qlcnic_set_multi(netdev);
786 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
787
788 adapter->ahw.linkup = 0;
789
790 if (adapter->max_sds_rings > 1)
791 qlcnic_config_rss(adapter, 1);
792
793 qlcnic_config_intr_coalesce(adapter);
794
795 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
796 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
797
798 qlcnic_napi_enable(adapter);
799
800 qlcnic_linkevent_request(adapter, 1);
801
802 set_bit(__QLCNIC_DEV_UP, &adapter->state);
803 return 0;
804}
805
806/* Usage: During resume and firmware recovery module.*/
807
808static int
809qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
810{
811 int err = 0;
812
813 rtnl_lock();
814 if (netif_running(netdev))
815 err = __qlcnic_up(adapter, netdev);
816 rtnl_unlock();
817
818 return err;
819}
820
821static void
822__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
823{
824 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
825 return;
826
827 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
828 return;
829
830 smp_mb();
831 spin_lock(&adapter->tx_clean_lock);
832 netif_carrier_off(netdev);
833 netif_tx_disable(netdev);
834
835 qlcnic_free_mac_list(adapter);
836
837 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
838
839 qlcnic_napi_disable(adapter);
840
841 qlcnic_release_tx_buffers(adapter);
842 spin_unlock(&adapter->tx_clean_lock);
843}
844
845/* Usage: During suspend and firmware recovery module */
846
847static void
848qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
849{
850 rtnl_lock();
851 if (netif_running(netdev))
852 __qlcnic_down(adapter, netdev);
853 rtnl_unlock();
854
855}
856
857static int
858qlcnic_attach(struct qlcnic_adapter *adapter)
859{
860 struct net_device *netdev = adapter->netdev;
861 struct pci_dev *pdev = adapter->pdev;
862 int err, ring;
863 struct qlcnic_host_rds_ring *rds_ring;
864
865 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
866 return 0;
867
868 err = qlcnic_init_firmware(adapter);
869 if (err)
870 return err;
871
872 err = qlcnic_napi_add(adapter, netdev);
873 if (err)
874 return err;
875
876 err = qlcnic_alloc_sw_resources(adapter);
877 if (err) {
878 dev_err(&pdev->dev, "Error in setting sw resources\n");
879 return err;
880 }
881
882 err = qlcnic_alloc_hw_resources(adapter);
883 if (err) {
884 dev_err(&pdev->dev, "Error in setting hw resources\n");
885 goto err_out_free_sw;
886 }
887
888
889 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
890 rds_ring = &adapter->recv_ctx.rds_rings[ring];
891 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
892 }
893
894 err = qlcnic_request_irq(adapter);
895 if (err) {
896 dev_err(&pdev->dev, "failed to setup interrupt\n");
897 goto err_out_free_rxbuf;
898 }
899
900 qlcnic_init_coalesce_defaults(adapter);
901
902 qlcnic_create_sysfs_entries(adapter);
903
904 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
905 return 0;
906
907err_out_free_rxbuf:
908 qlcnic_release_rx_buffers(adapter);
909 qlcnic_free_hw_resources(adapter);
910err_out_free_sw:
911 qlcnic_free_sw_resources(adapter);
912 return err;
913}
914
915static void
916qlcnic_detach(struct qlcnic_adapter *adapter)
917{
918 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
919 return;
920
921 qlcnic_remove_sysfs_entries(adapter);
922
923 qlcnic_free_hw_resources(adapter);
924 qlcnic_release_rx_buffers(adapter);
925 qlcnic_free_irq(adapter);
926 qlcnic_napi_del(adapter);
927 qlcnic_free_sw_resources(adapter);
928
929 adapter->is_up = 0;
930}
931
932void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
933{
934 struct qlcnic_adapter *adapter = netdev_priv(netdev);
935 struct qlcnic_host_sds_ring *sds_ring;
936 int ring;
937
938 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
939 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
940 sds_ring = &adapter->recv_ctx.sds_rings[ring];
941 qlcnic_disable_int(sds_ring);
942 }
943 }
944
945 qlcnic_detach(adapter);
946
947 adapter->diag_test = 0;
948 adapter->max_sds_rings = max_sds_rings;
949
950 if (qlcnic_attach(adapter))
951 return;
952
953 if (netif_running(netdev))
954 __qlcnic_up(adapter, netdev);
955
956 netif_device_attach(netdev);
957}
958
959int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
960{
961 struct qlcnic_adapter *adapter = netdev_priv(netdev);
962 struct qlcnic_host_sds_ring *sds_ring;
963 int ring;
964 int ret;
965
966 netif_device_detach(netdev);
967
968 if (netif_running(netdev))
969 __qlcnic_down(adapter, netdev);
970
971 qlcnic_detach(adapter);
972
973 adapter->max_sds_rings = 1;
974 adapter->diag_test = test;
975
976 ret = qlcnic_attach(adapter);
977 if (ret)
978 return ret;
979
980 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
981 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
982 sds_ring = &adapter->recv_ctx.sds_rings[ring];
983 qlcnic_enable_int(sds_ring);
984 }
985 }
986
987 return 0;
988}
989
990int
991qlcnic_reset_context(struct qlcnic_adapter *adapter)
992{
993 int err = 0;
994 struct net_device *netdev = adapter->netdev;
995
996 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
997 return -EBUSY;
998
999 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1000
1001 netif_device_detach(netdev);
1002
1003 if (netif_running(netdev))
1004 __qlcnic_down(adapter, netdev);
1005
1006 qlcnic_detach(adapter);
1007
1008 if (netif_running(netdev)) {
1009 err = qlcnic_attach(adapter);
1010 if (!err)
1011 err = __qlcnic_up(adapter, netdev);
1012
1013 if (err)
1014 goto done;
1015 }
1016
1017 netif_device_attach(netdev);
1018 }
1019
1020done:
1021 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1022 return err;
1023}
1024
1025static int
1026qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1027 struct net_device *netdev)
1028{
1029 int err;
1030 struct pci_dev *pdev = adapter->pdev;
1031
1032 adapter->rx_csum = 1;
1033 adapter->mc_enabled = 0;
1034 adapter->max_mc_count = 38;
1035
1036 netdev->netdev_ops = &qlcnic_netdev_ops;
1037 netdev->watchdog_timeo = 2*HZ;
1038
1039 qlcnic_change_mtu(netdev, netdev->mtu);
1040
1041 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1042
1043 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1044 netdev->features |= (NETIF_F_GRO);
1045 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1046
1047 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1048 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1049
1050 if (adapter->pci_using_dac) {
1051 netdev->features |= NETIF_F_HIGHDMA;
1052 netdev->vlan_features |= NETIF_F_HIGHDMA;
1053 }
1054
1055 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1056 netdev->features |= (NETIF_F_HW_VLAN_TX);
1057
1058 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1059 netdev->features |= NETIF_F_LRO;
1060
1061 netdev->irq = adapter->msix_entries[0].vector;
1062
1063 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1064
1065 if (qlcnic_read_mac_addr(adapter))
1066 dev_warn(&pdev->dev, "failed to read mac addr\n");
1067
1068 netif_carrier_off(netdev);
1069 netif_stop_queue(netdev);
1070
1071 err = register_netdev(netdev);
1072 if (err) {
1073 dev_err(&pdev->dev, "failed to register net device\n");
1074 return err;
1075 }
1076
1077 return 0;
1078}
1079
1080static int __devinit
1081qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1082{
1083 struct net_device *netdev = NULL;
1084 struct qlcnic_adapter *adapter = NULL;
1085 int err;
1086 int pci_func_id = PCI_FUNC(pdev->devfn);
1087 uint8_t revision_id;
1088
1089 err = pci_enable_device(pdev);
1090 if (err)
1091 return err;
1092
1093 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1094 err = -ENODEV;
1095 goto err_out_disable_pdev;
1096 }
1097
1098 err = pci_request_regions(pdev, qlcnic_driver_name);
1099 if (err)
1100 goto err_out_disable_pdev;
1101
1102 pci_set_master(pdev);
1103
1104 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1105 if (!netdev) {
1106 dev_err(&pdev->dev, "failed to allocate net_device\n");
1107 err = -ENOMEM;
1108 goto err_out_free_res;
1109 }
1110
1111 SET_NETDEV_DEV(netdev, &pdev->dev);
1112
1113 adapter = netdev_priv(netdev);
1114 adapter->netdev = netdev;
1115 adapter->pdev = pdev;
1116 adapter->ahw.pci_func = pci_func_id;
1117
1118 revision_id = pdev->revision;
1119 adapter->ahw.revision_id = revision_id;
1120
1121 rwlock_init(&adapter->ahw.crb_lock);
1122 mutex_init(&adapter->ahw.mem_lock);
1123
1124 spin_lock_init(&adapter->tx_clean_lock);
1125 INIT_LIST_HEAD(&adapter->mac_list);
1126
1127 err = qlcnic_setup_pci_map(adapter);
1128 if (err)
1129 goto err_out_free_netdev;
1130
1131 /* This will be reset for mezz cards */
1132 adapter->portnum = pci_func_id;
1133
1134 err = qlcnic_get_board_info(adapter);
1135 if (err) {
1136 dev_err(&pdev->dev, "Error getting board config info.\n");
1137 goto err_out_iounmap;
1138 }
1139
1140
1141 err = qlcnic_start_firmware(adapter);
1142 if (err)
1143 goto err_out_decr_ref;
1144
1145 /*
1146 * See if the firmware gave us a virtual-physical port mapping.
1147 */
1148 adapter->physical_port = adapter->portnum;
1149
1150 qlcnic_clear_stats(adapter);
1151
1152 qlcnic_setup_intr(adapter);
1153
1154 err = qlcnic_setup_netdev(adapter, netdev);
1155 if (err)
1156 goto err_out_disable_msi;
1157
1158 pci_set_drvdata(pdev, adapter);
1159
1160 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1161
1162 switch (adapter->ahw.port_type) {
1163 case QLCNIC_GBE:
1164 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1165 adapter->netdev->name);
1166 break;
1167 case QLCNIC_XGBE:
1168 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1169 adapter->netdev->name);
1170 break;
1171 }
1172
1173 qlcnic_create_diag_entries(adapter);
1174
1175 return 0;
1176
1177err_out_disable_msi:
1178 qlcnic_teardown_intr(adapter);
1179
1180err_out_decr_ref:
1181 qlcnic_clr_all_drv_state(adapter);
1182
1183err_out_iounmap:
1184 qlcnic_cleanup_pci_map(adapter);
1185
1186err_out_free_netdev:
1187 free_netdev(netdev);
1188
1189err_out_free_res:
1190 pci_release_regions(pdev);
1191
1192err_out_disable_pdev:
1193 pci_set_drvdata(pdev, NULL);
1194 pci_disable_device(pdev);
1195 return err;
1196}
1197
1198static void __devexit qlcnic_remove(struct pci_dev *pdev)
1199{
1200 struct qlcnic_adapter *adapter;
1201 struct net_device *netdev;
1202
1203 adapter = pci_get_drvdata(pdev);
1204 if (adapter == NULL)
1205 return;
1206
1207 netdev = adapter->netdev;
1208
1209 qlcnic_cancel_fw_work(adapter);
1210
1211 unregister_netdev(netdev);
1212
1213 cancel_work_sync(&adapter->tx_timeout_task);
1214
1215 qlcnic_detach(adapter);
1216
1217 qlcnic_clr_all_drv_state(adapter);
1218
1219 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1220
1221 qlcnic_teardown_intr(adapter);
1222
1223 qlcnic_remove_diag_entries(adapter);
1224
1225 qlcnic_cleanup_pci_map(adapter);
1226
1227 qlcnic_release_firmware(adapter);
1228
1229 pci_release_regions(pdev);
1230 pci_disable_device(pdev);
1231 pci_set_drvdata(pdev, NULL);
1232
1233 free_netdev(netdev);
1234}
1235static int __qlcnic_shutdown(struct pci_dev *pdev)
1236{
1237 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1238 struct net_device *netdev = adapter->netdev;
1239 int retval;
1240
1241 netif_device_detach(netdev);
1242
1243 qlcnic_cancel_fw_work(adapter);
1244
1245 if (netif_running(netdev))
1246 qlcnic_down(adapter, netdev);
1247
1248 cancel_work_sync(&adapter->tx_timeout_task);
1249
1250 qlcnic_detach(adapter);
1251
1252 qlcnic_clr_all_drv_state(adapter);
1253
1254 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1255
1256 retval = pci_save_state(pdev);
1257 if (retval)
1258 return retval;
1259
1260 if (qlcnic_wol_supported(adapter)) {
1261 pci_enable_wake(pdev, PCI_D3cold, 1);
1262 pci_enable_wake(pdev, PCI_D3hot, 1);
1263 }
1264
1265 return 0;
1266}
1267
1268static void qlcnic_shutdown(struct pci_dev *pdev)
1269{
1270 if (__qlcnic_shutdown(pdev))
1271 return;
1272
1273 pci_disable_device(pdev);
1274}
1275
1276#ifdef CONFIG_PM
1277static int
1278qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1279{
1280 int retval;
1281
1282 retval = __qlcnic_shutdown(pdev);
1283 if (retval)
1284 return retval;
1285
1286 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1287 return 0;
1288}
1289
1290static int
1291qlcnic_resume(struct pci_dev *pdev)
1292{
1293 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1294 struct net_device *netdev = adapter->netdev;
1295 int err;
1296
1297 err = pci_enable_device(pdev);
1298 if (err)
1299 return err;
1300
1301 pci_set_power_state(pdev, PCI_D0);
1302 pci_set_master(pdev);
1303 pci_restore_state(pdev);
1304
1305 adapter->ahw.crb_win = -1;
1306 adapter->ahw.ocm_win = -1;
1307
1308 err = qlcnic_start_firmware(adapter);
1309 if (err) {
1310 dev_err(&pdev->dev, "failed to start firmware\n");
1311 return err;
1312 }
1313
1314 if (netif_running(netdev)) {
1315 err = qlcnic_attach(adapter);
1316 if (err)
1317 goto err_out;
1318
1319 err = qlcnic_up(adapter, netdev);
1320 if (err)
1321 goto err_out_detach;
1322
1323
1324 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1325 }
1326
1327 netif_device_attach(netdev);
1328 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1329 return 0;
1330
1331err_out_detach:
1332 qlcnic_detach(adapter);
1333err_out:
1334 qlcnic_clr_all_drv_state(adapter);
1335 return err;
1336}
1337#endif
1338
1339static int qlcnic_open(struct net_device *netdev)
1340{
1341 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1342 int err;
1343
1344 if (adapter->driver_mismatch)
1345 return -EIO;
1346
1347 err = qlcnic_attach(adapter);
1348 if (err)
1349 return err;
1350
1351 err = __qlcnic_up(adapter, netdev);
1352 if (err)
1353 goto err_out;
1354
1355 netif_start_queue(netdev);
1356
1357 return 0;
1358
1359err_out:
1360 qlcnic_detach(adapter);
1361 return err;
1362}
1363
1364/*
1365 * qlcnic_close - Disables a network interface entry point
1366 */
1367static int qlcnic_close(struct net_device *netdev)
1368{
1369 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1370
1371 __qlcnic_down(adapter, netdev);
1372 return 0;
1373}
1374
1375static void
1376qlcnic_tso_check(struct net_device *netdev,
1377 struct qlcnic_host_tx_ring *tx_ring,
1378 struct cmd_desc_type0 *first_desc,
1379 struct sk_buff *skb)
1380{
1381 u8 opcode = TX_ETHER_PKT;
1382 __be16 protocol = skb->protocol;
1383 u16 flags = 0, vid = 0;
1384 u32 producer;
1385 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1386 struct cmd_desc_type0 *hwdesc;
1387 struct vlan_ethhdr *vh;
1388
1389 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1390
1391 vh = (struct vlan_ethhdr *)skb->data;
1392 protocol = vh->h_vlan_encapsulated_proto;
1393 flags = FLAGS_VLAN_TAGGED;
1394
1395 } else if (vlan_tx_tag_present(skb)) {
1396
1397 flags = FLAGS_VLAN_OOB;
1398 vid = vlan_tx_tag_get(skb);
1399 qlcnic_set_tx_vlan_tci(first_desc, vid);
1400 vlan_oob = 1;
1401 }
1402
1403 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1404 skb_shinfo(skb)->gso_size > 0) {
1405
1406 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1407
1408 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1409 first_desc->total_hdr_length = hdr_len;
1410 if (vlan_oob) {
1411 first_desc->total_hdr_length += VLAN_HLEN;
1412 first_desc->tcp_hdr_offset = VLAN_HLEN;
1413 first_desc->ip_hdr_offset = VLAN_HLEN;
1414 /* Only in case of TSO on vlan device */
1415 flags |= FLAGS_VLAN_TAGGED;
1416 }
1417
1418 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1419 TX_TCP_LSO6 : TX_TCP_LSO;
1420 tso = 1;
1421
1422 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1423 u8 l4proto;
1424
1425 if (protocol == cpu_to_be16(ETH_P_IP)) {
1426 l4proto = ip_hdr(skb)->protocol;
1427
1428 if (l4proto == IPPROTO_TCP)
1429 opcode = TX_TCP_PKT;
1430 else if (l4proto == IPPROTO_UDP)
1431 opcode = TX_UDP_PKT;
1432 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1433 l4proto = ipv6_hdr(skb)->nexthdr;
1434
1435 if (l4proto == IPPROTO_TCP)
1436 opcode = TX_TCPV6_PKT;
1437 else if (l4proto == IPPROTO_UDP)
1438 opcode = TX_UDPV6_PKT;
1439 }
1440 }
1441
1442 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1443 first_desc->ip_hdr_offset += skb_network_offset(skb);
1444 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1445
1446 if (!tso)
1447 return;
1448
1449 /* For LSO, we need to copy the MAC/IP/TCP headers into
1450 * the descriptor ring
1451 */
1452 producer = tx_ring->producer;
1453 copied = 0;
1454 offset = 2;
1455
1456 if (vlan_oob) {
1457 /* Create a TSO vlan header template for firmware */
1458
1459 hwdesc = &tx_ring->desc_head[producer];
1460 tx_ring->cmd_buf_arr[producer].skb = NULL;
1461
1462 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1463 hdr_len + VLAN_HLEN);
1464
1465 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1466 skb_copy_from_linear_data(skb, vh, 12);
1467 vh->h_vlan_proto = htons(ETH_P_8021Q);
1468 vh->h_vlan_TCI = htons(vid);
1469 skb_copy_from_linear_data_offset(skb, 12,
1470 (char *)vh + 16, copy_len - 16);
1471
1472 copied = copy_len - VLAN_HLEN;
1473 offset = 0;
1474
1475 producer = get_next_index(producer, tx_ring->num_desc);
1476 }
1477
1478 while (copied < hdr_len) {
1479
1480 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1481 (hdr_len - copied));
1482
1483 hwdesc = &tx_ring->desc_head[producer];
1484 tx_ring->cmd_buf_arr[producer].skb = NULL;
1485
1486 skb_copy_from_linear_data_offset(skb, copied,
1487 (char *)hwdesc + offset, copy_len);
1488
1489 copied += copy_len;
1490 offset = 0;
1491
1492 producer = get_next_index(producer, tx_ring->num_desc);
1493 }
1494
1495 tx_ring->producer = producer;
1496 barrier();
1497}
1498
1499static int
1500qlcnic_map_tx_skb(struct pci_dev *pdev,
1501 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1502{
1503 struct qlcnic_skb_frag *nf;
1504 struct skb_frag_struct *frag;
1505 int i, nr_frags;
1506 dma_addr_t map;
1507
1508 nr_frags = skb_shinfo(skb)->nr_frags;
1509 nf = &pbuf->frag_array[0];
1510
1511 map = pci_map_single(pdev, skb->data,
1512 skb_headlen(skb), PCI_DMA_TODEVICE);
1513 if (pci_dma_mapping_error(pdev, map))
1514 goto out_err;
1515
1516 nf->dma = map;
1517 nf->length = skb_headlen(skb);
1518
1519 for (i = 0; i < nr_frags; i++) {
1520 frag = &skb_shinfo(skb)->frags[i];
1521 nf = &pbuf->frag_array[i+1];
1522
1523 map = pci_map_page(pdev, frag->page, frag->page_offset,
1524 frag->size, PCI_DMA_TODEVICE);
1525 if (pci_dma_mapping_error(pdev, map))
1526 goto unwind;
1527
1528 nf->dma = map;
1529 nf->length = frag->size;
1530 }
1531
1532 return 0;
1533
1534unwind:
1535 while (--i >= 0) {
1536 nf = &pbuf->frag_array[i+1];
1537 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1538 }
1539
1540 nf = &pbuf->frag_array[0];
1541 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1542
1543out_err:
1544 return -ENOMEM;
1545}
1546
1547static inline void
1548qlcnic_clear_cmddesc(u64 *desc)
1549{
1550 desc[0] = 0ULL;
1551 desc[2] = 0ULL;
1552}
1553
1554netdev_tx_t
1555qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1556{
1557 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1558 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1559 struct qlcnic_cmd_buffer *pbuf;
1560 struct qlcnic_skb_frag *buffrag;
1561 struct cmd_desc_type0 *hwdesc, *first_desc;
1562 struct pci_dev *pdev;
1563 int i, k;
1564
1565 u32 producer;
1566 int frag_count, no_of_desc;
1567 u32 num_txd = tx_ring->num_desc;
1568
1569 frag_count = skb_shinfo(skb)->nr_frags + 1;
1570
1571 /* 4 fragments per cmd des */
1572 no_of_desc = (frag_count + 3) >> 2;
1573
1574 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1575 netif_stop_queue(netdev);
1576 return NETDEV_TX_BUSY;
1577 }
1578
1579 producer = tx_ring->producer;
1580 pbuf = &tx_ring->cmd_buf_arr[producer];
1581
1582 pdev = adapter->pdev;
1583
1584 if (qlcnic_map_tx_skb(pdev, skb, pbuf))
1585 goto drop_packet;
1586
1587 pbuf->skb = skb;
1588 pbuf->frag_count = frag_count;
1589
1590 first_desc = hwdesc = &tx_ring->desc_head[producer];
1591 qlcnic_clear_cmddesc((u64 *)hwdesc);
1592
1593 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1594 qlcnic_set_tx_port(first_desc, adapter->portnum);
1595
1596 for (i = 0; i < frag_count; i++) {
1597
1598 k = i % 4;
1599
1600 if ((k == 0) && (i > 0)) {
1601 /* move to next desc.*/
1602 producer = get_next_index(producer, num_txd);
1603 hwdesc = &tx_ring->desc_head[producer];
1604 qlcnic_clear_cmddesc((u64 *)hwdesc);
1605 tx_ring->cmd_buf_arr[producer].skb = NULL;
1606 }
1607
1608 buffrag = &pbuf->frag_array[i];
1609
1610 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1611 switch (k) {
1612 case 0:
1613 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1614 break;
1615 case 1:
1616 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1617 break;
1618 case 2:
1619 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1620 break;
1621 case 3:
1622 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1623 break;
1624 }
1625 }
1626
1627 tx_ring->producer = get_next_index(producer, num_txd);
1628
1629 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1630
1631 qlcnic_update_cmd_producer(adapter, tx_ring);
1632
1633 adapter->stats.txbytes += skb->len;
1634 adapter->stats.xmitcalled++;
1635
1636 return NETDEV_TX_OK;
1637
1638drop_packet:
1639 adapter->stats.txdropped++;
1640 dev_kfree_skb_any(skb);
1641 return NETDEV_TX_OK;
1642}
1643
1644static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1645{
1646 struct net_device *netdev = adapter->netdev;
1647 u32 temp, temp_state, temp_val;
1648 int rv = 0;
1649
1650 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1651
1652 temp_state = qlcnic_get_temp_state(temp);
1653 temp_val = qlcnic_get_temp_val(temp);
1654
1655 if (temp_state == QLCNIC_TEMP_PANIC) {
1656 dev_err(&netdev->dev,
1657 "Device temperature %d degrees C exceeds"
1658 " maximum allowed. Hardware has been shut down.\n",
1659 temp_val);
1660 rv = 1;
1661 } else if (temp_state == QLCNIC_TEMP_WARN) {
1662 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1663 dev_err(&netdev->dev,
1664 "Device temperature %d degrees C "
1665 "exceeds operating range."
1666 " Immediate action needed.\n",
1667 temp_val);
1668 }
1669 } else {
1670 if (adapter->temp == QLCNIC_TEMP_WARN) {
1671 dev_info(&netdev->dev,
1672 "Device temperature is now %d degrees C"
1673 " in normal range.\n", temp_val);
1674 }
1675 }
1676 adapter->temp = temp_state;
1677 return rv;
1678}
1679
1680void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1681{
1682 struct net_device *netdev = adapter->netdev;
1683
1684 if (adapter->ahw.linkup && !linkup) {
1685 dev_info(&netdev->dev, "NIC Link is down\n");
1686 adapter->ahw.linkup = 0;
1687 if (netif_running(netdev)) {
1688 netif_carrier_off(netdev);
1689 netif_stop_queue(netdev);
1690 }
1691 } else if (!adapter->ahw.linkup && linkup) {
1692 dev_info(&netdev->dev, "NIC Link is up\n");
1693 adapter->ahw.linkup = 1;
1694 if (netif_running(netdev)) {
1695 netif_carrier_on(netdev);
1696 netif_wake_queue(netdev);
1697 }
1698 }
1699}
1700
1701static void qlcnic_tx_timeout(struct net_device *netdev)
1702{
1703 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1704
1705 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1706 return;
1707
1708 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1709 schedule_work(&adapter->tx_timeout_task);
1710}
1711
1712static void qlcnic_tx_timeout_task(struct work_struct *work)
1713{
1714 struct qlcnic_adapter *adapter =
1715 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1716
1717 if (!netif_running(adapter->netdev))
1718 return;
1719
1720 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1721 return;
1722
1723 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1724 goto request_reset;
1725
1726 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1727 if (!qlcnic_reset_context(adapter)) {
1728 adapter->netdev->trans_start = jiffies;
1729 return;
1730
1731 /* context reset failed, fall through for fw reset */
1732 }
1733
1734request_reset:
1735 adapter->need_fw_reset = 1;
1736 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1737}
1738
1739static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1740{
1741 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1742 struct net_device_stats *stats = &netdev->stats;
1743
1744 memset(stats, 0, sizeof(*stats));
1745
1746 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1747 stats->tx_packets = adapter->stats.xmitfinished;
1748 stats->rx_bytes = adapter->stats.rxbytes;
1749 stats->tx_bytes = adapter->stats.txbytes;
1750 stats->rx_dropped = adapter->stats.rxdropped;
1751 stats->tx_dropped = adapter->stats.txdropped;
1752
1753 return stats;
1754}
1755
1756static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
1757{
1758 u32 status;
1759
1760 status = readl(adapter->isr_int_vec);
1761
1762 if (!(status & adapter->int_vec_bit))
1763 return IRQ_NONE;
1764
1765 /* check interrupt state machine, to be sure */
1766 status = readl(adapter->crb_int_state_reg);
1767 if (!ISR_LEGACY_INT_TRIGGERED(status))
1768 return IRQ_NONE;
1769
1770 writel(0xffffffff, adapter->tgt_status_reg);
1771 /* read twice to ensure write is flushed */
1772 readl(adapter->isr_int_vec);
1773 readl(adapter->isr_int_vec);
1774
1775 return IRQ_HANDLED;
1776}
1777
1778static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
1779{
1780 struct qlcnic_host_sds_ring *sds_ring = data;
1781 struct qlcnic_adapter *adapter = sds_ring->adapter;
1782
1783 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1784 goto done;
1785 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
1786 writel(0xffffffff, adapter->tgt_status_reg);
1787 goto done;
1788 }
1789
1790 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1791 return IRQ_NONE;
1792
1793done:
1794 adapter->diag_cnt++;
1795 qlcnic_enable_int(sds_ring);
1796 return IRQ_HANDLED;
1797}
1798
1799static irqreturn_t qlcnic_intr(int irq, void *data)
1800{
1801 struct qlcnic_host_sds_ring *sds_ring = data;
1802 struct qlcnic_adapter *adapter = sds_ring->adapter;
1803
1804 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1805 return IRQ_NONE;
1806
1807 napi_schedule(&sds_ring->napi);
1808
1809 return IRQ_HANDLED;
1810}
1811
1812static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1813{
1814 struct qlcnic_host_sds_ring *sds_ring = data;
1815 struct qlcnic_adapter *adapter = sds_ring->adapter;
1816
1817 /* clear interrupt */
1818 writel(0xffffffff, adapter->tgt_status_reg);
1819
1820 napi_schedule(&sds_ring->napi);
1821 return IRQ_HANDLED;
1822}
1823
1824static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1825{
1826 struct qlcnic_host_sds_ring *sds_ring = data;
1827
1828 napi_schedule(&sds_ring->napi);
1829 return IRQ_HANDLED;
1830}
1831
1832static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1833{
1834 u32 sw_consumer, hw_consumer;
1835 int count = 0, i;
1836 struct qlcnic_cmd_buffer *buffer;
1837 struct pci_dev *pdev = adapter->pdev;
1838 struct net_device *netdev = adapter->netdev;
1839 struct qlcnic_skb_frag *frag;
1840 int done;
1841 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1842
1843 if (!spin_trylock(&adapter->tx_clean_lock))
1844 return 1;
1845
1846 sw_consumer = tx_ring->sw_consumer;
1847 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1848
1849 while (sw_consumer != hw_consumer) {
1850 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1851 if (buffer->skb) {
1852 frag = &buffer->frag_array[0];
1853 pci_unmap_single(pdev, frag->dma, frag->length,
1854 PCI_DMA_TODEVICE);
1855 frag->dma = 0ULL;
1856 for (i = 1; i < buffer->frag_count; i++) {
1857 frag++;
1858 pci_unmap_page(pdev, frag->dma, frag->length,
1859 PCI_DMA_TODEVICE);
1860 frag->dma = 0ULL;
1861 }
1862
1863 adapter->stats.xmitfinished++;
1864 dev_kfree_skb_any(buffer->skb);
1865 buffer->skb = NULL;
1866 }
1867
1868 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1869 if (++count >= MAX_STATUS_HANDLE)
1870 break;
1871 }
1872
1873 if (count && netif_running(netdev)) {
1874 tx_ring->sw_consumer = sw_consumer;
1875
1876 smp_mb();
1877
1878 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1879 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1880 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1881 netif_wake_queue(netdev);
1882 adapter->tx_timeo_cnt = 0;
1883 }
1884 __netif_tx_unlock(tx_ring->txq);
1885 }
1886 }
1887 /*
1888 * If everything is freed up to consumer then check if the ring is full
1889 * If the ring is full then check if more needs to be freed and
1890 * schedule the call back again.
1891 *
1892 * This happens when there are 2 CPUs. One could be freeing and the
1893 * other filling it. If the ring is full when we get out of here and
1894 * the card has already interrupted the host then the host can miss the
1895 * interrupt.
1896 *
1897 * There is still a possible race condition and the host could miss an
1898 * interrupt. The card has to take care of this.
1899 */
1900 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1901 done = (sw_consumer == hw_consumer);
1902 spin_unlock(&adapter->tx_clean_lock);
1903
1904 return done;
1905}
1906
1907static int qlcnic_poll(struct napi_struct *napi, int budget)
1908{
1909 struct qlcnic_host_sds_ring *sds_ring =
1910 container_of(napi, struct qlcnic_host_sds_ring, napi);
1911
1912 struct qlcnic_adapter *adapter = sds_ring->adapter;
1913
1914 int tx_complete;
1915 int work_done;
1916
1917 tx_complete = qlcnic_process_cmd_ring(adapter);
1918
1919 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1920
1921 if ((work_done < budget) && tx_complete) {
1922 napi_complete(&sds_ring->napi);
1923 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1924 qlcnic_enable_int(sds_ring);
1925 }
1926
1927 return work_done;
1928}
1929
1930#ifdef CONFIG_NET_POLL_CONTROLLER
1931static void qlcnic_poll_controller(struct net_device *netdev)
1932{
1933 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1934 disable_irq(adapter->irq);
1935 qlcnic_intr(adapter->irq, adapter);
1936 enable_irq(adapter->irq);
1937}
1938#endif
1939
1940static void
1941qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
1942{
1943 u32 val;
1944
1945 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
1946 state != QLCNIC_DEV_NEED_QUISCENT);
1947
1948 if (qlcnic_api_lock(adapter))
1949 return ;
1950
1951 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1952
1953 if (state == QLCNIC_DEV_NEED_RESET)
1954 val |= ((u32)0x1 << (adapter->portnum * 4));
1955 else if (state == QLCNIC_DEV_NEED_QUISCENT)
1956 val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
1957
1958 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1959
1960 qlcnic_api_unlock(adapter);
1961}
1962
1963static int
1964qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
1965{
1966 u32 val;
1967
1968 if (qlcnic_api_lock(adapter))
1969 return -EBUSY;
1970
1971 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1972 val &= ~((u32)0x3 << (adapter->portnum * 4));
1973 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1974
1975 qlcnic_api_unlock(adapter);
1976
1977 return 0;
1978}
1979
1980static void
1981qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1982{
1983 u32 val;
1984
1985 if (qlcnic_api_lock(adapter))
1986 goto err;
1987
1988 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1989 val &= ~((u32)0x1 << (adapter->portnum * 4));
1990 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1991
1992 if (!(val & 0x11111111))
1993 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
1994
1995 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1996 val &= ~((u32)0x3 << (adapter->portnum * 4));
1997 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1998
1999 qlcnic_api_unlock(adapter);
2000err:
2001 adapter->fw_fail_cnt = 0;
2002 clear_bit(__QLCNIC_START_FW, &adapter->state);
2003 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2004}
2005
2006static int
2007qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2008{
2009 int act, state;
2010
2011 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2012 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2013
2014 if (((state & 0x11111111) == (act & 0x11111111)) ||
2015 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2016 return 0;
2017 else
2018 return 1;
2019}
2020
2021static int
2022qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2023{
2024 u32 val, prev_state;
2025 int cnt = 0;
2026 int portnum = adapter->portnum;
2027
2028 if (qlcnic_api_lock(adapter))
2029 return -1;
2030
2031 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2032 if (!(val & ((int)0x1 << (portnum * 4)))) {
2033 val |= ((u32)0x1 << (portnum * 4));
2034 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2035 } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
2036 goto start_fw;
2037 }
2038
2039 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2040
2041 switch (prev_state) {
2042 case QLCNIC_DEV_COLD:
2043start_fw:
2044 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
2045 qlcnic_api_unlock(adapter);
2046 return 1;
2047
2048 case QLCNIC_DEV_READY:
2049 qlcnic_api_unlock(adapter);
2050 return 0;
2051
2052 case QLCNIC_DEV_NEED_RESET:
2053 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2054 val |= ((u32)0x1 << (portnum * 4));
2055 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2056 break;
2057
2058 case QLCNIC_DEV_NEED_QUISCENT:
2059 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2060 val |= ((u32)0x1 << ((portnum * 4) + 1));
2061 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2062 break;
2063
2064 case QLCNIC_DEV_FAILED:
2065 qlcnic_api_unlock(adapter);
2066 return -1;
2067 }
2068
2069 qlcnic_api_unlock(adapter);
2070 msleep(1000);
2071 while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
2072 ++cnt < 20)
2073 msleep(1000);
2074
2075 if (cnt >= 20)
2076 return -1;
2077
2078 if (qlcnic_api_lock(adapter))
2079 return -1;
2080
2081 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2082 val &= ~((u32)0x3 << (portnum * 4));
2083 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2084
2085 qlcnic_api_unlock(adapter);
2086
2087 return 0;
2088}
2089
2090static void
2091qlcnic_fwinit_work(struct work_struct *work)
2092{
2093 struct qlcnic_adapter *adapter = container_of(work,
2094 struct qlcnic_adapter, fw_work.work);
2095 int dev_state;
2096
2097 if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
2098 goto err_ret;
2099
2100 if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
2101
2102 if (qlcnic_check_drv_state(adapter)) {
2103 qlcnic_schedule_work(adapter,
2104 qlcnic_fwinit_work, FW_POLL_DELAY);
2105 return;
2106 }
2107
2108 if (!qlcnic_start_firmware(adapter)) {
2109 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2110 return;
2111 }
2112
2113 goto err_ret;
2114 }
2115
2116 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2117 switch (dev_state) {
2118 case QLCNIC_DEV_READY:
2119 if (!qlcnic_start_firmware(adapter)) {
2120 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2121 return;
2122 }
2123 case QLCNIC_DEV_FAILED:
2124 break;
2125
2126 default:
2127 qlcnic_schedule_work(adapter,
2128 qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
2129 return;
2130 }
2131
2132err_ret:
2133 qlcnic_clr_all_drv_state(adapter);
2134}
2135
2136static void
2137qlcnic_detach_work(struct work_struct *work)
2138{
2139 struct qlcnic_adapter *adapter = container_of(work,
2140 struct qlcnic_adapter, fw_work.work);
2141 struct net_device *netdev = adapter->netdev;
2142 u32 status;
2143
2144 netif_device_detach(netdev);
2145
2146 qlcnic_down(adapter, netdev);
2147
2148 rtnl_lock();
2149 qlcnic_detach(adapter);
2150 rtnl_unlock();
2151
2152 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2153
2154 if (status & QLCNIC_RCODE_FATAL_ERROR)
2155 goto err_ret;
2156
2157 if (adapter->temp == QLCNIC_TEMP_PANIC)
2158 goto err_ret;
2159
2160 qlcnic_set_drv_state(adapter, adapter->dev_state);
2161
2162 adapter->fw_wait_cnt = 0;
2163
2164 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2165
2166 return;
2167
2168err_ret:
2169 qlcnic_clr_all_drv_state(adapter);
2170
2171}
2172
2173static void
2174qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2175{
2176 u32 state;
2177
2178 if (qlcnic_api_lock(adapter))
2179 return;
2180
2181 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2182
2183 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
2184 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2185 set_bit(__QLCNIC_START_FW, &adapter->state);
2186 }
2187
2188 qlcnic_api_unlock(adapter);
2189}
2190
2191static void
2192qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2193 work_func_t func, int delay)
2194{
2195 INIT_DELAYED_WORK(&adapter->fw_work, func);
2196 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2197}
2198
2199static void
2200qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2201{
2202 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2203 msleep(10);
2204
2205 cancel_delayed_work_sync(&adapter->fw_work);
2206}
2207
2208static void
2209qlcnic_attach_work(struct work_struct *work)
2210{
2211 struct qlcnic_adapter *adapter = container_of(work,
2212 struct qlcnic_adapter, fw_work.work);
2213 struct net_device *netdev = adapter->netdev;
2214 int err;
2215
2216 if (netif_running(netdev)) {
2217 err = qlcnic_attach(adapter);
2218 if (err)
2219 goto done;
2220
2221 err = qlcnic_up(adapter, netdev);
2222 if (err) {
2223 qlcnic_detach(adapter);
2224 goto done;
2225 }
2226
2227 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2228 }
2229
2230 netif_device_attach(netdev);
2231
2232done:
2233 adapter->fw_fail_cnt = 0;
2234 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2235
2236 if (!qlcnic_clr_drv_state(adapter))
2237 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2238 FW_POLL_DELAY);
2239}
2240
2241static int
2242qlcnic_check_health(struct qlcnic_adapter *adapter)
2243{
2244 u32 state = 0, heartbit;
2245 struct net_device *netdev = adapter->netdev;
2246
2247 if (qlcnic_check_temp(adapter))
2248 goto detach;
2249
2250 if (adapter->need_fw_reset) {
2251 qlcnic_dev_request_reset(adapter);
2252 goto detach;
2253 }
2254
2255 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2256 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2257 adapter->need_fw_reset = 1;
2258
2259 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2260 if (heartbit != adapter->heartbit) {
2261 adapter->heartbit = heartbit;
2262 adapter->fw_fail_cnt = 0;
2263 if (adapter->need_fw_reset)
2264 goto detach;
2265 return 0;
2266 }
2267
2268 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2269 return 0;
2270
2271 qlcnic_dev_request_reset(adapter);
2272
2273 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2274
2275 dev_info(&netdev->dev, "firmware hang detected\n");
2276
2277detach:
2278 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2279 QLCNIC_DEV_NEED_RESET;
2280
2281 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2282 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2283 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2284
2285 return 1;
2286}
2287
2288static void
2289qlcnic_fw_poll_work(struct work_struct *work)
2290{
2291 struct qlcnic_adapter *adapter = container_of(work,
2292 struct qlcnic_adapter, fw_work.work);
2293
2294 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2295 goto reschedule;
2296
2297
2298 if (qlcnic_check_health(adapter))
2299 return;
2300
2301reschedule:
2302 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2303}
2304
2305static ssize_t
2306qlcnic_store_bridged_mode(struct device *dev,
2307 struct device_attribute *attr, const char *buf, size_t len)
2308{
2309 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2310 unsigned long new;
2311 int ret = -EINVAL;
2312
2313 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2314 goto err_out;
2315
2316 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2317 goto err_out;
2318
2319 if (strict_strtoul(buf, 2, &new))
2320 goto err_out;
2321
2322 if (!qlcnic_config_bridged_mode(adapter, !!new))
2323 ret = len;
2324
2325err_out:
2326 return ret;
2327}
2328
2329static ssize_t
2330qlcnic_show_bridged_mode(struct device *dev,
2331 struct device_attribute *attr, char *buf)
2332{
2333 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2334 int bridged_mode = 0;
2335
2336 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2337 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2338
2339 return sprintf(buf, "%d\n", bridged_mode);
2340}
2341
2342static struct device_attribute dev_attr_bridged_mode = {
2343 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2344 .show = qlcnic_show_bridged_mode,
2345 .store = qlcnic_store_bridged_mode,
2346};
2347
2348static ssize_t
2349qlcnic_store_diag_mode(struct device *dev,
2350 struct device_attribute *attr, const char *buf, size_t len)
2351{
2352 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2353 unsigned long new;
2354
2355 if (strict_strtoul(buf, 2, &new))
2356 return -EINVAL;
2357
2358 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2359 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2360
2361 return len;
2362}
2363
2364static ssize_t
2365qlcnic_show_diag_mode(struct device *dev,
2366 struct device_attribute *attr, char *buf)
2367{
2368 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2369
2370 return sprintf(buf, "%d\n",
2371 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2372}
2373
2374static struct device_attribute dev_attr_diag_mode = {
2375 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2376 .show = qlcnic_show_diag_mode,
2377 .store = qlcnic_store_diag_mode,
2378};
2379
2380static int
2381qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2382 loff_t offset, size_t size)
2383{
2384 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2385 return -EIO;
2386
2387 if ((size != 4) || (offset & 0x3))
2388 return -EINVAL;
2389
2390 if (offset < QLCNIC_PCI_CRBSPACE)
2391 return -EINVAL;
2392
2393 return 0;
2394}
2395
2396static ssize_t
2397qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2398 char *buf, loff_t offset, size_t size)
2399{
2400 struct device *dev = container_of(kobj, struct device, kobj);
2401 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2402 u32 data;
2403 int ret;
2404
2405 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2406 if (ret != 0)
2407 return ret;
2408
2409 data = QLCRD32(adapter, offset);
2410 memcpy(buf, &data, size);
2411 return size;
2412}
2413
2414static ssize_t
2415qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2416 char *buf, loff_t offset, size_t size)
2417{
2418 struct device *dev = container_of(kobj, struct device, kobj);
2419 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2420 u32 data;
2421 int ret;
2422
2423 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2424 if (ret != 0)
2425 return ret;
2426
2427 memcpy(&data, buf, size);
2428 QLCWR32(adapter, offset, data);
2429 return size;
2430}
2431
2432static int
2433qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2434 loff_t offset, size_t size)
2435{
2436 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2437 return -EIO;
2438
2439 if ((size != 8) || (offset & 0x7))
2440 return -EIO;
2441
2442 return 0;
2443}
2444
2445static ssize_t
2446qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2447 char *buf, loff_t offset, size_t size)
2448{
2449 struct device *dev = container_of(kobj, struct device, kobj);
2450 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2451 u64 data;
2452 int ret;
2453
2454 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2455 if (ret != 0)
2456 return ret;
2457
2458 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2459 return -EIO;
2460
2461 memcpy(buf, &data, size);
2462
2463 return size;
2464}
2465
2466static ssize_t
2467qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
2468 char *buf, loff_t offset, size_t size)
2469{
2470 struct device *dev = container_of(kobj, struct device, kobj);
2471 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2472 u64 data;
2473 int ret;
2474
2475 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2476 if (ret != 0)
2477 return ret;
2478
2479 memcpy(&data, buf, size);
2480
2481 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2482 return -EIO;
2483
2484 return size;
2485}
2486
2487
2488static struct bin_attribute bin_attr_crb = {
2489 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2490 .size = 0,
2491 .read = qlcnic_sysfs_read_crb,
2492 .write = qlcnic_sysfs_write_crb,
2493};
2494
2495static struct bin_attribute bin_attr_mem = {
2496 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2497 .size = 0,
2498 .read = qlcnic_sysfs_read_mem,
2499 .write = qlcnic_sysfs_write_mem,
2500};
2501
2502static void
2503qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2504{
2505 struct device *dev = &adapter->pdev->dev;
2506
2507 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2508 if (device_create_file(dev, &dev_attr_bridged_mode))
2509 dev_warn(dev,
2510 "failed to create bridged_mode sysfs entry\n");
2511}
2512
2513static void
2514qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2515{
2516 struct device *dev = &adapter->pdev->dev;
2517
2518 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2519 device_remove_file(dev, &dev_attr_bridged_mode);
2520}
2521
2522static void
2523qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2524{
2525 struct device *dev = &adapter->pdev->dev;
2526
2527 if (device_create_file(dev, &dev_attr_diag_mode))
2528 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2529 if (device_create_bin_file(dev, &bin_attr_crb))
2530 dev_info(dev, "failed to create crb sysfs entry\n");
2531 if (device_create_bin_file(dev, &bin_attr_mem))
2532 dev_info(dev, "failed to create mem sysfs entry\n");
2533}
2534
2535
2536static void
2537qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2538{
2539 struct device *dev = &adapter->pdev->dev;
2540
2541 device_remove_file(dev, &dev_attr_diag_mode);
2542 device_remove_bin_file(dev, &bin_attr_crb);
2543 device_remove_bin_file(dev, &bin_attr_mem);
2544}
2545
2546#ifdef CONFIG_INET
2547
2548#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2549
2550static int
2551qlcnic_destip_supported(struct qlcnic_adapter *adapter)
2552{
2553 if (adapter->ahw.cut_through)
2554 return 0;
2555
2556 return 1;
2557}
2558
2559static void
2560qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2561{
2562 struct in_device *indev;
2563 struct qlcnic_adapter *adapter = netdev_priv(dev);
2564
2565 if (!qlcnic_destip_supported(adapter))
2566 return;
2567
2568 indev = in_dev_get(dev);
2569 if (!indev)
2570 return;
2571
2572 for_ifa(indev) {
2573 switch (event) {
2574 case NETDEV_UP:
2575 qlcnic_config_ipaddr(adapter,
2576 ifa->ifa_address, QLCNIC_IP_UP);
2577 break;
2578 case NETDEV_DOWN:
2579 qlcnic_config_ipaddr(adapter,
2580 ifa->ifa_address, QLCNIC_IP_DOWN);
2581 break;
2582 default:
2583 break;
2584 }
2585 } endfor_ifa(indev);
2586
2587 in_dev_put(indev);
2588 return;
2589}
2590
2591static int qlcnic_netdev_event(struct notifier_block *this,
2592 unsigned long event, void *ptr)
2593{
2594 struct qlcnic_adapter *adapter;
2595 struct net_device *dev = (struct net_device *)ptr;
2596
2597recheck:
2598 if (dev == NULL)
2599 goto done;
2600
2601 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2602 dev = vlan_dev_real_dev(dev);
2603 goto recheck;
2604 }
2605
2606 if (!is_qlcnic_netdev(dev))
2607 goto done;
2608
2609 adapter = netdev_priv(dev);
2610
2611 if (!adapter)
2612 goto done;
2613
2614 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2615 goto done;
2616
2617 qlcnic_config_indev_addr(dev, event);
2618done:
2619 return NOTIFY_DONE;
2620}
2621
2622static int
2623qlcnic_inetaddr_event(struct notifier_block *this,
2624 unsigned long event, void *ptr)
2625{
2626 struct qlcnic_adapter *adapter;
2627 struct net_device *dev;
2628
2629 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2630
2631 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2632
2633recheck:
2634 if (dev == NULL || !netif_running(dev))
2635 goto done;
2636
2637 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2638 dev = vlan_dev_real_dev(dev);
2639 goto recheck;
2640 }
2641
2642 if (!is_qlcnic_netdev(dev))
2643 goto done;
2644
2645 adapter = netdev_priv(dev);
2646
2647 if (!adapter || !qlcnic_destip_supported(adapter))
2648 goto done;
2649
2650 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2651 goto done;
2652
2653 switch (event) {
2654 case NETDEV_UP:
2655 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2656 break;
2657 case NETDEV_DOWN:
2658 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2659 break;
2660 default:
2661 break;
2662 }
2663
2664done:
2665 return NOTIFY_DONE;
2666}
2667
2668static struct notifier_block qlcnic_netdev_cb = {
2669 .notifier_call = qlcnic_netdev_event,
2670};
2671
2672static struct notifier_block qlcnic_inetaddr_cb = {
2673 .notifier_call = qlcnic_inetaddr_event,
2674};
2675#else
2676static void
2677qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2678{ }
2679#endif
2680
2681static struct pci_driver qlcnic_driver = {
2682 .name = qlcnic_driver_name,
2683 .id_table = qlcnic_pci_tbl,
2684 .probe = qlcnic_probe,
2685 .remove = __devexit_p(qlcnic_remove),
2686#ifdef CONFIG_PM
2687 .suspend = qlcnic_suspend,
2688 .resume = qlcnic_resume,
2689#endif
2690 .shutdown = qlcnic_shutdown
2691};
2692
2693static int __init qlcnic_init_module(void)
2694{
2695
2696 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2697
2698#ifdef CONFIG_INET
2699 register_netdevice_notifier(&qlcnic_netdev_cb);
2700 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2701#endif
2702
2703
2704 return pci_register_driver(&qlcnic_driver);
2705}
2706
2707module_init(qlcnic_init_module);
2708
2709static void __exit qlcnic_exit_module(void)
2710{
2711
2712 pci_unregister_driver(&qlcnic_driver);
2713
2714#ifdef CONFIG_INET
2715 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
2716 unregister_netdevice_notifier(&qlcnic_netdev_cb);
2717#endif
2718}
2719
2720module_exit(qlcnic_exit_module);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 862c1aaf386..9169c4cf413 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -54,12 +54,8 @@
54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ 54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ 55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) 56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
57#define SMALL_BUFFER_SIZE 512
58#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
59#define LARGE_BUFFER_MAX_SIZE 8192 57#define LARGE_BUFFER_MAX_SIZE 8192
60#define LARGE_BUFFER_MIN_SIZE 2048 58#define LARGE_BUFFER_MIN_SIZE 2048
61#define MAX_SPLIT_SIZE 1023
62#define QLGE_SB_PAD 32
63 59
64#define MAX_CQ 128 60#define MAX_CQ 128
65#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ 61#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
@@ -79,15 +75,43 @@
79#define TX_DESC_PER_OAL 0 75#define TX_DESC_PER_OAL 0
80#endif 76#endif
81 77
78/* Word shifting for converting 64-bit
79 * address to a series of 16-bit words.
80 * This is used for some MPI firmware
81 * mailbox commands.
82 */
83#define LSW(x) ((u16)(x))
84#define MSW(x) ((u16)((u32)(x) >> 16))
85#define LSD(x) ((u32)((u64)(x)))
86#define MSD(x) ((u32)((((u64)(x)) >> 32)))
87
82/* MPI test register definitions. This register 88/* MPI test register definitions. This register
83 * is used for determining alternate NIC function's 89 * is used for determining alternate NIC function's
84 * PCI->func number. 90 * PCI->func number.
85 */ 91 */
86enum { 92enum {
87 MPI_TEST_FUNC_PORT_CFG = 0x1002, 93 MPI_TEST_FUNC_PORT_CFG = 0x1002,
94 MPI_TEST_FUNC_PRB_CTL = 0x100e,
95 MPI_TEST_FUNC_PRB_EN = 0x18a20000,
96 MPI_TEST_FUNC_RST_STS = 0x100a,
97 MPI_TEST_FUNC_RST_FRC = 0x00000003,
98 MPI_TEST_NIC_FUNC_MASK = 0x00000007,
99 MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
100 MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
88 MPI_TEST_NIC1_FUNC_SHIFT = 1, 101 MPI_TEST_NIC1_FUNC_SHIFT = 1,
102 MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
103 MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
89 MPI_TEST_NIC2_FUNC_SHIFT = 5, 104 MPI_TEST_NIC2_FUNC_SHIFT = 5,
90 MPI_TEST_NIC_FUNC_MASK = 0x00000007, 105 MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
106 MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
107 MPI_TEST_FC1_FUNCTION_SHIFT = 9,
108 MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
109 MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
110 MPI_TEST_FC2_FUNCTION_SHIFT = 13,
111
112 MPI_NIC_READ = 0x00000000,
113 MPI_NIC_REG_BLOCK = 0x00020000,
114 MPI_NIC_FUNCTION_SHIFT = 6,
91}; 115};
92 116
93/* 117/*
@@ -468,7 +492,7 @@ enum {
468 MDIO_PORT = 0x00000440, 492 MDIO_PORT = 0x00000440,
469 MDIO_STATUS = 0x00000450, 493 MDIO_STATUS = 0x00000450,
470 494
471 /* XGMAC AUX statistics registers */ 495 XGMAC_REGISTER_END = 0x00000740,
472}; 496};
473 497
474/* 498/*
@@ -509,6 +533,7 @@ enum {
509enum { 533enum {
510 MAC_ADDR_IDX_SHIFT = 4, 534 MAC_ADDR_IDX_SHIFT = 4,
511 MAC_ADDR_TYPE_SHIFT = 16, 535 MAC_ADDR_TYPE_SHIFT = 16,
536 MAC_ADDR_TYPE_COUNT = 10,
512 MAC_ADDR_TYPE_MASK = 0x000f0000, 537 MAC_ADDR_TYPE_MASK = 0x000f0000,
513 MAC_ADDR_TYPE_CAM_MAC = 0x00000000, 538 MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
514 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, 539 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
@@ -526,6 +551,30 @@ enum {
526 MAC_ADDR_MR = (1 << 30), 551 MAC_ADDR_MR = (1 << 30),
527 MAC_ADDR_MW = (1 << 31), 552 MAC_ADDR_MW = (1 << 31),
528 MAX_MULTICAST_ENTRIES = 32, 553 MAX_MULTICAST_ENTRIES = 32,
554
555 /* Entry count and words per entry
556 * for each address type in the filter.
557 */
558 MAC_ADDR_MAX_CAM_ENTRIES = 512,
559 MAC_ADDR_MAX_CAM_WCOUNT = 3,
560 MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
561 MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
562 MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
563 MAC_ADDR_MAX_VLAN_WCOUNT = 1,
564 MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
565 MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
566 MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
567 MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
568 MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
569 MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
570 MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
571 MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
572 MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
573 MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
574 MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
575 MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
576 MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
577 MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
529}; 578};
530 579
531/* 580/*
@@ -596,6 +645,7 @@ enum {
596enum { 645enum {
597 RT_IDX_IDX_SHIFT = 8, 646 RT_IDX_IDX_SHIFT = 8,
598 RT_IDX_TYPE_MASK = 0x000f0000, 647 RT_IDX_TYPE_MASK = 0x000f0000,
648 RT_IDX_TYPE_SHIFT = 16,
599 RT_IDX_TYPE_RT = 0x00000000, 649 RT_IDX_TYPE_RT = 0x00000000,
600 RT_IDX_TYPE_RT_INV = 0x00010000, 650 RT_IDX_TYPE_RT_INV = 0x00010000,
601 RT_IDX_TYPE_NICQ = 0x00020000, 651 RT_IDX_TYPE_NICQ = 0x00020000,
@@ -664,7 +714,89 @@ enum {
664 RT_IDX_UNUSED013 = 13, 714 RT_IDX_UNUSED013 = 13,
665 RT_IDX_UNUSED014 = 14, 715 RT_IDX_UNUSED014 = 14,
666 RT_IDX_PROMISCUOUS_SLOT = 15, 716 RT_IDX_PROMISCUOUS_SLOT = 15,
667 RT_IDX_MAX_SLOTS = 16, 717 RT_IDX_MAX_RT_SLOTS = 8,
718 RT_IDX_MAX_NIC_SLOTS = 16,
719};
720
721/*
722 * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
723 */
724enum {
725 XG_SERDES_ADDR_RDY = (1 << 31),
726 XG_SERDES_ADDR_R = (1 << 30),
727
728 XG_SERDES_ADDR_STS = 0x00001E06,
729 XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
730 XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
731 XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
732
733 /* Serdes coredump definitions. */
734 XG_SERDES_XAUI_AN_START = 0x00000000,
735 XG_SERDES_XAUI_AN_END = 0x00000034,
736 XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
737 XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
738 XG_SERDES_XFI_AN_START = 0x00001000,
739 XG_SERDES_XFI_AN_END = 0x00001034,
740 XG_SERDES_XFI_TRAIN_START = 0x10001050,
741 XG_SERDES_XFI_TRAIN_END = 0x1000107C,
742 XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
743 XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
744 XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
745 XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
746 XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
747 XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
748 XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
749 XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
750};
751
752/*
753 * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
754 */
755enum {
756 PRB_MX_ADDR_ARE = (1 << 16),
757 PRB_MX_ADDR_UP = (1 << 15),
758 PRB_MX_ADDR_SWP = (1 << 14),
759
760 /* Module select values. */
761 PRB_MX_ADDR_MAX_MODS = 21,
762 PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
763 PRB_MX_ADDR_MOD_SEL_TBD = 0,
764 PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
765 PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
766 PRB_MX_ADDR_MOD_SEL_FRB = 3,
767 PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
768 PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
769 PRB_MX_ADDR_MOD_SEL_DA1 = 6,
770 PRB_MX_ADDR_MOD_SEL_DA2 = 7,
771 PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
772 PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
773 PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
774 PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
775 PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
776 PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
777 PRB_MX_ADDR_MOD_SEL_REG = 14,
778 PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
779 PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
780 PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
781 PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
782 PRB_MX_ADDR_MOD_SEL_MOP = 20,
783 /* Bit fields indicating which modules
784 * are valid for each clock domain.
785 */
786 PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
787 PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
788 PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
789 PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
790 PRB_MX_ADDR_VALID_TOTAL = 34,
791
792 /* Clock domain values. */
793 PRB_MX_ADDR_CLOCK_SHIFT = 6,
794 PRB_MX_ADDR_SYS_CLOCK = 0,
795 PRB_MX_ADDR_PCI_CLOCK = 2,
796 PRB_MX_ADDR_FC_CLOCK = 5,
797 PRB_MX_ADDR_XGM_CLOCK = 6,
798
799 PRB_MX_ADDR_MAX_MUX = 64,
668}; 800};
669 801
670/* 802/*
@@ -737,6 +869,21 @@ enum {
737 PRB_MX_DATA = 0xfc, /* Use semaphore */ 869 PRB_MX_DATA = 0xfc, /* Use semaphore */
738}; 870};
739 871
872#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
873#define SMALL_BUFFER_SIZE 256
874#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
875#define SPLT_SETTING FSC_DBRST_1024
876#define SPLT_LEN 0
877#define QLGE_SB_PAD 0
878#else
879#define SMALL_BUFFER_SIZE 512
880#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
881#define SPLT_SETTING FSC_SH
882#define SPLT_LEN (SPLT_HDR_EP | \
883 min(SMALL_BUF_MAP_SIZE, 1023))
884#define QLGE_SB_PAD 32
885#endif
886
740/* 887/*
741 * CAM output format. 888 * CAM output format.
742 */ 889 */
@@ -1421,7 +1568,7 @@ struct nic_stats {
1421 u64 rx_nic_fifo_drop; 1568 u64 rx_nic_fifo_drop;
1422}; 1569};
1423 1570
1424/* Address/Length pairs for the coredump. */ 1571/* Firmware coredump internal register address/length pairs. */
1425enum { 1572enum {
1426 MPI_CORE_REGS_ADDR = 0x00030000, 1573 MPI_CORE_REGS_ADDR = 0x00030000,
1427 MPI_CORE_REGS_CNT = 127, 1574 MPI_CORE_REGS_CNT = 127,
@@ -1476,7 +1623,7 @@ struct mpi_coredump_segment_header {
1476 u8 description[16]; 1623 u8 description[16];
1477}; 1624};
1478 1625
1479/* Reg dump segment numbers. */ 1626/* Firmware coredump header segment numbers. */
1480enum { 1627enum {
1481 CORE_SEG_NUM = 1, 1628 CORE_SEG_NUM = 1,
1482 TEST_LOGIC_SEG_NUM = 2, 1629 TEST_LOGIC_SEG_NUM = 2,
@@ -1527,6 +1674,67 @@ enum {
1527 1674
1528}; 1675};
1529 1676
1677/* There are 64 generic NIC registers. */
1678#define NIC_REGS_DUMP_WORD_COUNT 64
1679/* XGMAC word count. */
1680#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
1681/* Word counts for the SERDES blocks. */
1682#define XG_SERDES_XAUI_AN_COUNT 14
1683#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
1684#define XG_SERDES_XFI_AN_COUNT 14
1685#define XG_SERDES_XFI_TRAIN_COUNT 12
1686#define XG_SERDES_XFI_HSS_PCS_COUNT 15
1687#define XG_SERDES_XFI_HSS_TX_COUNT 32
1688#define XG_SERDES_XFI_HSS_RX_COUNT 32
1689#define XG_SERDES_XFI_HSS_PLL_COUNT 32
1690
1691/* There are 2 CNA ETS and 8 NIC ETS registers. */
1692#define ETS_REGS_DUMP_WORD_COUNT 10
1693
1694/* Each probe mux entry stores the probe type plus 64 entries
1695 * that are each each 64-bits in length. There are a total of
1696 * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
1697 */
1698#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
1699#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
1700 PRB_MX_ADDR_VALID_TOTAL)
1701/* Each routing entry consists of 4 32-bit words.
1702 * They are route type, index, index word, and result.
1703 * There are 2 route blocks with 8 entries each and
1704 * 2 NIC blocks with 16 entries each.
1705 * The totol entries is 48 with 4 words each.
1706 */
1707#define RT_IDX_DUMP_ENTRIES 48
1708#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
1709#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
1710 RT_IDX_DUMP_WORDS_PER_ENTRY)
1711/* There are 10 address blocks in filter, each with
1712 * different entry counts and different word-count-per-entry.
1713 */
1714#define MAC_ADDR_DUMP_ENTRIES \
1715 ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
1716 (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
1717 (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
1718 (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
1719 (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
1720 (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
1721 (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
1722 (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
1723 (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
1724 (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
1725#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
1726#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
1727 MAC_ADDR_DUMP_WORDS_PER_ENTRY)
1728/* Maximum of 4 functions whose semaphore registeres are
1729 * in the coredump.
1730 */
1731#define MAX_SEMAPHORE_FUNCTIONS 4
1732/* Defines for access the MPI shadow registers. */
1733#define RISC_124 0x0003007c
1734#define RISC_127 0x0003007f
1735#define SHADOW_OFFSET 0xb0000000
1736#define SHADOW_REG_SHIFT 20
1737
1530struct ql_nic_misc { 1738struct ql_nic_misc {
1531 u32 rx_ring_count; 1739 u32 rx_ring_count;
1532 u32 tx_ring_count; 1740 u32 tx_ring_count;
@@ -1568,6 +1776,199 @@ struct ql_reg_dump {
1568 u32 ets[8+2]; 1776 u32 ets[8+2];
1569}; 1777};
1570 1778
1779struct ql_mpi_coredump {
1780 /* segment 0 */
1781 struct mpi_coredump_global_header mpi_global_header;
1782
1783 /* segment 1 */
1784 struct mpi_coredump_segment_header core_regs_seg_hdr;
1785 u32 mpi_core_regs[MPI_CORE_REGS_CNT];
1786 u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
1787
1788 /* segment 2 */
1789 struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
1790 u32 test_logic_regs[TEST_REGS_CNT];
1791
1792 /* segment 3 */
1793 struct mpi_coredump_segment_header rmii_regs_seg_hdr;
1794 u32 rmii_regs[RMII_REGS_CNT];
1795
1796 /* segment 4 */
1797 struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
1798 u32 fcmac1_regs[FCMAC_REGS_CNT];
1799
1800 /* segment 5 */
1801 struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
1802 u32 fcmac2_regs[FCMAC_REGS_CNT];
1803
1804 /* segment 6 */
1805 struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
1806 u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
1807
1808 /* segment 7 */
1809 struct mpi_coredump_segment_header ide_regs_seg_hdr;
1810 u32 ide_regs[IDE_REGS_CNT];
1811
1812 /* segment 8 */
1813 struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
1814 u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
1815
1816 /* segment 9 */
1817 struct mpi_coredump_segment_header smbus_regs_seg_hdr;
1818 u32 smbus_regs[SMBUS_REGS_CNT];
1819
1820 /* segment 10 */
1821 struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
1822 u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
1823
1824 /* segment 11 */
1825 struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
1826 u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
1827
1828 /* segment 12 */
1829 struct mpi_coredump_segment_header i2c_regs_seg_hdr;
1830 u32 i2c_regs[I2C_REGS_CNT];
1831 /* segment 13 */
1832 struct mpi_coredump_segment_header memc_regs_seg_hdr;
1833 u32 memc_regs[MEMC_REGS_CNT];
1834
1835 /* segment 14 */
1836 struct mpi_coredump_segment_header pbus_regs_seg_hdr;
1837 u32 pbus_regs[PBUS_REGS_CNT];
1838
1839 /* segment 15 */
1840 struct mpi_coredump_segment_header mde_regs_seg_hdr;
1841 u32 mde_regs[MDE_REGS_CNT];
1842
1843 /* segment 16 */
1844 struct mpi_coredump_segment_header nic_regs_seg_hdr;
1845 u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
1846
1847 /* segment 17 */
1848 struct mpi_coredump_segment_header nic2_regs_seg_hdr;
1849 u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
1850
1851 /* segment 18 */
1852 struct mpi_coredump_segment_header xgmac1_seg_hdr;
1853 u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
1854
1855 /* segment 19 */
1856 struct mpi_coredump_segment_header xgmac2_seg_hdr;
1857 u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
1858
1859 /* segment 20 */
1860 struct mpi_coredump_segment_header code_ram_seg_hdr;
1861 u32 code_ram[CODE_RAM_CNT];
1862
1863 /* segment 21 */
1864 struct mpi_coredump_segment_header memc_ram_seg_hdr;
1865 u32 memc_ram[MEMC_RAM_CNT];
1866
1867 /* segment 22 */
1868 struct mpi_coredump_segment_header xaui_an_hdr;
1869 u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1870
1871 /* segment 23 */
1872 struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
1873 u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1874
1875 /* segment 24 */
1876 struct mpi_coredump_segment_header xfi_an_hdr;
1877 u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
1878
1879 /* segment 25 */
1880 struct mpi_coredump_segment_header xfi_train_hdr;
1881 u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1882
1883 /* segment 26 */
1884 struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
1885 u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1886
1887 /* segment 27 */
1888 struct mpi_coredump_segment_header xfi_hss_tx_hdr;
1889 u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1890
1891 /* segment 28 */
1892 struct mpi_coredump_segment_header xfi_hss_rx_hdr;
1893 u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1894
1895 /* segment 29 */
1896 struct mpi_coredump_segment_header xfi_hss_pll_hdr;
1897 u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1898
1899 /* segment 30 */
1900 struct mpi_coredump_segment_header misc_nic_seg_hdr;
1901 struct ql_nic_misc misc_nic_info;
1902
1903 /* segment 31 */
1904 /* one interrupt state for each CQ */
1905 struct mpi_coredump_segment_header intr_states_seg_hdr;
1906 u32 intr_states[MAX_RX_RINGS];
1907
1908 /* segment 32 */
1909 /* 3 cam words each for 16 unicast,
1910 * 2 cam words for each of 32 multicast.
1911 */
1912 struct mpi_coredump_segment_header cam_entries_seg_hdr;
1913 u32 cam_entries[(16 * 3) + (32 * 3)];
1914
1915 /* segment 33 */
1916 struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
1917 u32 nic_routing_words[16];
1918 /* segment 34 */
1919 struct mpi_coredump_segment_header ets_seg_hdr;
1920 u32 ets[ETS_REGS_DUMP_WORD_COUNT];
1921
1922 /* segment 35 */
1923 struct mpi_coredump_segment_header probe_dump_seg_hdr;
1924 u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
1925
1926 /* segment 36 */
1927 struct mpi_coredump_segment_header routing_reg_seg_hdr;
1928 u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
1929
1930 /* segment 37 */
1931 struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
1932 u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
1933
1934 /* segment 38 */
1935 struct mpi_coredump_segment_header xaui2_an_hdr;
1936 u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1937
1938 /* segment 39 */
1939 struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
1940 u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1941
1942 /* segment 40 */
1943 struct mpi_coredump_segment_header xfi2_an_hdr;
1944 u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
1945
1946 /* segment 41 */
1947 struct mpi_coredump_segment_header xfi2_train_hdr;
1948 u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1949
1950 /* segment 42 */
1951 struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
1952 u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1953
1954 /* segment 43 */
1955 struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
1956 u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1957
1958 /* segment 44 */
1959 struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
1960 u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1961
1962 /* segment 45 */
1963 struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
1964 u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1965
1966 /* segment 50 */
1967 /* semaphore register for all 5 functions */
1968 struct mpi_coredump_segment_header sem_regs_seg_hdr;
1969 u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
1970};
1971
1571/* 1972/*
1572 * intr_context structure is used during initialization 1973 * intr_context structure is used during initialization
1573 * to hook the interrupts. It is also used in a single 1974 * to hook the interrupts. It is also used in a single
@@ -1603,6 +2004,7 @@ enum {
1603 QL_CAM_RT_SET = 8, 2004 QL_CAM_RT_SET = 8,
1604 QL_SELFTEST = 9, 2005 QL_SELFTEST = 9,
1605 QL_LB_LINK_UP = 10, 2006 QL_LB_LINK_UP = 10,
2007 QL_FRC_COREDUMP = 11,
1606}; 2008};
1607 2009
1608/* link_status bit definitions */ 2010/* link_status bit definitions */
@@ -1724,6 +2126,8 @@ struct ql_adapter {
1724 u32 port_link_up; 2126 u32 port_link_up;
1725 u32 port_init; 2127 u32 port_init;
1726 u32 link_status; 2128 u32 link_status;
2129 struct ql_mpi_coredump *mpi_coredump;
2130 u32 core_is_dumped;
1727 u32 link_config; 2131 u32 link_config;
1728 u32 led_config; 2132 u32 led_config;
1729 u32 max_frame_size; 2133 u32 max_frame_size;
@@ -1736,6 +2140,7 @@ struct ql_adapter {
1736 struct delayed_work mpi_work; 2140 struct delayed_work mpi_work;
1737 struct delayed_work mpi_port_cfg_work; 2141 struct delayed_work mpi_port_cfg_work;
1738 struct delayed_work mpi_idc_work; 2142 struct delayed_work mpi_idc_work;
2143 struct delayed_work mpi_core_to_log;
1739 struct completion ide_completion; 2144 struct completion ide_completion;
1740 struct nic_operations *nic_ops; 2145 struct nic_operations *nic_ops;
1741 u16 device_id; 2146 u16 device_id;
@@ -1807,6 +2212,7 @@ extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
1807void ql_queue_fw_error(struct ql_adapter *qdev); 2212void ql_queue_fw_error(struct ql_adapter *qdev);
1808void ql_mpi_work(struct work_struct *work); 2213void ql_mpi_work(struct work_struct *work);
1809void ql_mpi_reset_work(struct work_struct *work); 2214void ql_mpi_reset_work(struct work_struct *work);
2215void ql_mpi_core_to_log(struct work_struct *work);
1810int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 2216int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1811void ql_queue_asic_error(struct ql_adapter *qdev); 2217void ql_queue_asic_error(struct ql_adapter *qdev);
1812u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 2218u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
@@ -1817,6 +2223,15 @@ void ql_mpi_port_cfg_work(struct work_struct *work);
1817int ql_mb_get_fw_state(struct ql_adapter *qdev); 2223int ql_mb_get_fw_state(struct ql_adapter *qdev);
1818int ql_cam_route_initialize(struct ql_adapter *qdev); 2224int ql_cam_route_initialize(struct ql_adapter *qdev);
1819int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 2225int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
2226int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2227int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2228int ql_pause_mpi_risc(struct ql_adapter *qdev);
2229int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2230int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2231 u32 ram_addr, int word_count);
2232int ql_core_dump(struct ql_adapter *qdev,
2233 struct ql_mpi_coredump *mpi_coredump);
2234int ql_mb_sys_err(struct ql_adapter *qdev);
1820int ql_mb_about_fw(struct ql_adapter *qdev); 2235int ql_mb_about_fw(struct ql_adapter *qdev);
1821int ql_wol(struct ql_adapter *qdev); 2236int ql_wol(struct ql_adapter *qdev);
1822int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2237int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -1833,6 +2248,7 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
1833 struct ql_reg_dump *mpi_coredump); 2248 struct ql_reg_dump *mpi_coredump);
1834netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2249netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
1835void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); 2250void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2251int ql_own_firmware(struct ql_adapter *qdev);
1836int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2252int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
1837 2253
1838#if 1 2254#if 1
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 9f58c471076..57df835147e 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,405 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3/* Read a NIC register from the alternate function. */
4static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
5 u32 reg)
6{
7 u32 register_to_read;
8 u32 reg_val;
9 unsigned int status = 0;
10
11 register_to_read = MPI_NIC_REG_BLOCK
12 | MPI_NIC_READ
13 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
14 | reg;
15 status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
16 if (status != 0)
17 return 0xffffffff;
18
19 return reg_val;
20}
21
22/* Write a NIC register from the alternate function. */
23static int ql_write_other_func_reg(struct ql_adapter *qdev,
24 u32 reg, u32 reg_val)
25{
26 u32 register_to_read;
27 int status = 0;
28
29 register_to_read = MPI_NIC_REG_BLOCK
30 | MPI_NIC_READ
31 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
32 | reg;
33 status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
34
35 return status;
36}
37
38static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
39 u32 bit, u32 err_bit)
40{
41 u32 temp;
42 int count = 10;
43
44 while (count) {
45 temp = ql_read_other_func_reg(qdev, reg);
46
47 /* check for errors */
48 if (temp & err_bit)
49 return -1;
50 else if (temp & bit)
51 return 0;
52 mdelay(10);
53 count--;
54 }
55 return -1;
56}
57
58static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
59 u32 *data)
60{
61 int status;
62
63 /* wait for reg to come ready */
64 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
65 XG_SERDES_ADDR_RDY, 0);
66 if (status)
67 goto exit;
68
69 /* set up for reg read */
70 ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
71
72 /* wait for reg to come ready */
73 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
74 XG_SERDES_ADDR_RDY, 0);
75 if (status)
76 goto exit;
77
78 /* get the data */
79 *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
80exit:
81 return status;
82}
83
84/* Read out the SERDES registers */
85static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
86{
87 int status;
88
89 /* wait for reg to come ready */
90 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
91 if (status)
92 goto exit;
93
94 /* set up for reg read */
95 ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
96
97 /* wait for reg to come ready */
98 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
99 if (status)
100 goto exit;
101
102 /* get the data */
103 *data = ql_read32(qdev, XG_SERDES_DATA);
104exit:
105 return status;
106}
107
108static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
109 u32 *direct_ptr, u32 *indirect_ptr,
110 unsigned int direct_valid, unsigned int indirect_valid)
111{
112 unsigned int status;
113
114 status = 1;
115 if (direct_valid)
116 status = ql_read_serdes_reg(qdev, addr, direct_ptr);
117 /* Dead fill any failures or invalids. */
118 if (status)
119 *direct_ptr = 0xDEADBEEF;
120
121 status = 1;
122 if (indirect_valid)
123 status = ql_read_other_func_serdes_reg(
124 qdev, addr, indirect_ptr);
125 /* Dead fill any failures or invalids. */
126 if (status)
127 *indirect_ptr = 0xDEADBEEF;
128}
129
130static int ql_get_serdes_regs(struct ql_adapter *qdev,
131 struct ql_mpi_coredump *mpi_coredump)
132{
133 int status;
134 unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
135 unsigned int xaui_indirect_valid, i;
136 u32 *direct_ptr, temp;
137 u32 *indirect_ptr;
138
139 xfi_direct_valid = xfi_indirect_valid = 0;
140 xaui_direct_valid = xaui_indirect_valid = 1;
141
142 /* The XAUI needs to be read out per port */
143 if (qdev->func & 1) {
144 /* We are NIC 2 */
145 status = ql_read_other_func_serdes_reg(qdev,
146 XG_SERDES_XAUI_HSS_PCS_START, &temp);
147 if (status)
148 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
149 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
150 XG_SERDES_ADDR_XAUI_PWR_DOWN)
151 xaui_indirect_valid = 0;
152
153 status = ql_read_serdes_reg(qdev,
154 XG_SERDES_XAUI_HSS_PCS_START, &temp);
155 if (status)
156 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
157
158 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
159 XG_SERDES_ADDR_XAUI_PWR_DOWN)
160 xaui_direct_valid = 0;
161 } else {
162 /* We are NIC 1 */
163 status = ql_read_other_func_serdes_reg(qdev,
164 XG_SERDES_XAUI_HSS_PCS_START, &temp);
165 if (status)
166 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
167 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
168 XG_SERDES_ADDR_XAUI_PWR_DOWN)
169 xaui_indirect_valid = 0;
170
171 status = ql_read_serdes_reg(qdev,
172 XG_SERDES_XAUI_HSS_PCS_START, &temp);
173 if (status)
174 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
175 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
176 XG_SERDES_ADDR_XAUI_PWR_DOWN)
177 xaui_direct_valid = 0;
178 }
179
180 /*
181 * XFI register is shared so only need to read one
182 * functions and then check the bits.
183 */
184 status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
185 if (status)
186 temp = 0;
187
188 if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
189 XG_SERDES_ADDR_XFI1_PWR_UP) {
190 /* now see if i'm NIC 1 or NIC 2 */
191 if (qdev->func & 1)
192 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
193 xfi_indirect_valid = 1;
194 else
195 xfi_direct_valid = 1;
196 }
197 if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
198 XG_SERDES_ADDR_XFI2_PWR_UP) {
199 /* now see if i'm NIC 1 or NIC 2 */
200 if (qdev->func & 1)
201 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
202 xfi_direct_valid = 1;
203 else
204 xfi_indirect_valid = 1;
205 }
206
207 /* Get XAUI_AN register block. */
208 if (qdev->func & 1) {
209 /* Function 2 is direct */
210 direct_ptr = mpi_coredump->serdes2_xaui_an;
211 indirect_ptr = mpi_coredump->serdes_xaui_an;
212 } else {
213 /* Function 1 is direct */
214 direct_ptr = mpi_coredump->serdes_xaui_an;
215 indirect_ptr = mpi_coredump->serdes2_xaui_an;
216 }
217
218 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
219 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
220 xaui_direct_valid, xaui_indirect_valid);
221
222 /* Get XAUI_HSS_PCS register block. */
223 if (qdev->func & 1) {
224 direct_ptr =
225 mpi_coredump->serdes2_xaui_hss_pcs;
226 indirect_ptr =
227 mpi_coredump->serdes_xaui_hss_pcs;
228 } else {
229 direct_ptr =
230 mpi_coredump->serdes_xaui_hss_pcs;
231 indirect_ptr =
232 mpi_coredump->serdes2_xaui_hss_pcs;
233 }
234
235 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
236 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
237 xaui_direct_valid, xaui_indirect_valid);
238
239 /* Get XAUI_XFI_AN register block. */
240 if (qdev->func & 1) {
241 direct_ptr = mpi_coredump->serdes2_xfi_an;
242 indirect_ptr = mpi_coredump->serdes_xfi_an;
243 } else {
244 direct_ptr = mpi_coredump->serdes_xfi_an;
245 indirect_ptr = mpi_coredump->serdes2_xfi_an;
246 }
247
248 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
249 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
250 xfi_direct_valid, xfi_indirect_valid);
251
252 /* Get XAUI_XFI_TRAIN register block. */
253 if (qdev->func & 1) {
254 direct_ptr = mpi_coredump->serdes2_xfi_train;
255 indirect_ptr =
256 mpi_coredump->serdes_xfi_train;
257 } else {
258 direct_ptr = mpi_coredump->serdes_xfi_train;
259 indirect_ptr =
260 mpi_coredump->serdes2_xfi_train;
261 }
262
263 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
264 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
265 xfi_direct_valid, xfi_indirect_valid);
266
267 /* Get XAUI_XFI_HSS_PCS register block. */
268 if (qdev->func & 1) {
269 direct_ptr =
270 mpi_coredump->serdes2_xfi_hss_pcs;
271 indirect_ptr =
272 mpi_coredump->serdes_xfi_hss_pcs;
273 } else {
274 direct_ptr =
275 mpi_coredump->serdes_xfi_hss_pcs;
276 indirect_ptr =
277 mpi_coredump->serdes2_xfi_hss_pcs;
278 }
279
280 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
281 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
282 xfi_direct_valid, xfi_indirect_valid);
283
284 /* Get XAUI_XFI_HSS_TX register block. */
285 if (qdev->func & 1) {
286 direct_ptr =
287 mpi_coredump->serdes2_xfi_hss_tx;
288 indirect_ptr =
289 mpi_coredump->serdes_xfi_hss_tx;
290 } else {
291 direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
292 indirect_ptr =
293 mpi_coredump->serdes2_xfi_hss_tx;
294 }
295 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
296 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
297 xfi_direct_valid, xfi_indirect_valid);
298
299 /* Get XAUI_XFI_HSS_RX register block. */
300 if (qdev->func & 1) {
301 direct_ptr =
302 mpi_coredump->serdes2_xfi_hss_rx;
303 indirect_ptr =
304 mpi_coredump->serdes_xfi_hss_rx;
305 } else {
306 direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
307 indirect_ptr =
308 mpi_coredump->serdes2_xfi_hss_rx;
309 }
310
311 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
312 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
313 xfi_direct_valid, xfi_indirect_valid);
314
315
316 /* Get XAUI_XFI_HSS_PLL register block. */
317 if (qdev->func & 1) {
318 direct_ptr =
319 mpi_coredump->serdes2_xfi_hss_pll;
320 indirect_ptr =
321 mpi_coredump->serdes_xfi_hss_pll;
322 } else {
323 direct_ptr =
324 mpi_coredump->serdes_xfi_hss_pll;
325 indirect_ptr =
326 mpi_coredump->serdes2_xfi_hss_pll;
327 }
328 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
329 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
330 xfi_direct_valid, xfi_indirect_valid);
331 return 0;
332}
333
334static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
335 u32 *data)
336{
337 int status = 0;
338
339 /* wait for reg to come ready */
340 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
341 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
342 if (status)
343 goto exit;
344
345 /* set up for reg read */
346 ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
347
348 /* wait for reg to come ready */
349 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
350 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
351 if (status)
352 goto exit;
353
354 /* get the data */
355 *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
356exit:
357 return status;
358}
359
360/* Read the 400 xgmac control/statistics registers
361 * skipping unused locations.
362 */
363static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
364 unsigned int other_function)
365{
366 int status = 0;
367 int i;
368
369 for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
370 /* We're reading 400 xgmac registers, but we filter out
371 * serveral locations that are non-responsive to reads.
372 */
373 if ((i == 0x00000114) ||
374 (i == 0x00000118) ||
375 (i == 0x0000013c) ||
376 (i == 0x00000140) ||
377 (i > 0x00000150 && i < 0x000001fc) ||
378 (i > 0x00000278 && i < 0x000002a0) ||
379 (i > 0x000002c0 && i < 0x000002cf) ||
380 (i > 0x000002dc && i < 0x000002f0) ||
381 (i > 0x000003c8 && i < 0x00000400) ||
382 (i > 0x00000400 && i < 0x00000410) ||
383 (i > 0x00000410 && i < 0x00000420) ||
384 (i > 0x00000420 && i < 0x00000430) ||
385 (i > 0x00000430 && i < 0x00000440) ||
386 (i > 0x00000440 && i < 0x00000450) ||
387 (i > 0x00000450 && i < 0x00000500) ||
388 (i > 0x0000054c && i < 0x00000568) ||
389 (i > 0x000005c8 && i < 0x00000600)) {
390 if (other_function)
391 status =
392 ql_read_other_func_xgmac_reg(qdev, i, buf);
393 else
394 status = ql_read_xgmac_reg(qdev, i, buf);
395
396 if (status)
397 *buf = 0xdeadbeef;
398 break;
399 }
400 }
401 return status;
402}
3 403
4static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) 404static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
5{ 405{
@@ -91,6 +491,226 @@ err:
91 return status; 491 return status;
92} 492}
93 493
494/* Read the MPI Processor shadow registers */
495static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
496{
497 u32 i;
498 int status;
499
500 for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
501 status = ql_write_mpi_reg(qdev, RISC_124,
502 (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
503 if (status)
504 goto end;
505 status = ql_read_mpi_reg(qdev, RISC_127, buf);
506 if (status)
507 goto end;
508 }
509end:
510 return status;
511}
512
513/* Read the MPI Processor core registers */
514static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
515 u32 offset, u32 count)
516{
517 int i, status = 0;
518 for (i = 0; i < count; i++, buf++) {
519 status = ql_read_mpi_reg(qdev, offset + i, buf);
520 if (status)
521 return status;
522 }
523 return status;
524}
525
526/* Read the ASIC probe dump */
527static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
528 u32 valid, u32 *buf)
529{
530 u32 module, mux_sel, probe, lo_val, hi_val;
531
532 for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
533 if (!((valid >> module) & 1))
534 continue;
535 for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
536 probe = clock
537 | PRB_MX_ADDR_ARE
538 | mux_sel
539 | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
540 ql_write32(qdev, PRB_MX_ADDR, probe);
541 lo_val = ql_read32(qdev, PRB_MX_DATA);
542 if (mux_sel == 0) {
543 *buf = probe;
544 buf++;
545 }
546 probe |= PRB_MX_ADDR_UP;
547 ql_write32(qdev, PRB_MX_ADDR, probe);
548 hi_val = ql_read32(qdev, PRB_MX_DATA);
549 *buf = lo_val;
550 buf++;
551 *buf = hi_val;
552 buf++;
553 }
554 }
555 return buf;
556}
557
558static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
559{
560 /* First we have to enable the probe mux */
561 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
562 buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
563 PRB_MX_ADDR_VALID_SYS_MOD, buf);
564 buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
565 PRB_MX_ADDR_VALID_PCI_MOD, buf);
566 buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
567 PRB_MX_ADDR_VALID_XGM_MOD, buf);
568 buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
569 PRB_MX_ADDR_VALID_FC_MOD, buf);
570 return 0;
571
572}
573
574/* Read out the routing index registers */
575static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
576{
577 int status;
578 u32 type, index, index_max;
579 u32 result_index;
580 u32 result_data;
581 u32 val;
582
583 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
584 if (status)
585 return status;
586
587 for (type = 0; type < 4; type++) {
588 if (type < 2)
589 index_max = 8;
590 else
591 index_max = 16;
592 for (index = 0; index < index_max; index++) {
593 val = RT_IDX_RS
594 | (type << RT_IDX_TYPE_SHIFT)
595 | (index << RT_IDX_IDX_SHIFT);
596 ql_write32(qdev, RT_IDX, val);
597 result_index = 0;
598 while ((result_index & RT_IDX_MR) == 0)
599 result_index = ql_read32(qdev, RT_IDX);
600 result_data = ql_read32(qdev, RT_DATA);
601 *buf = type;
602 buf++;
603 *buf = index;
604 buf++;
605 *buf = result_index;
606 buf++;
607 *buf = result_data;
608 buf++;
609 }
610 }
611 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
612 return status;
613}
614
615/* Read out the MAC protocol registers */
616static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
617{
618 u32 result_index, result_data;
619 u32 type;
620 u32 index;
621 u32 offset;
622 u32 val;
623 u32 initial_val = MAC_ADDR_RS;
624 u32 max_index;
625 u32 max_offset;
626
627 for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
628 switch (type) {
629
630 case 0: /* CAM */
631 initial_val |= MAC_ADDR_ADR;
632 max_index = MAC_ADDR_MAX_CAM_ENTRIES;
633 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
634 break;
635 case 1: /* Multicast MAC Address */
636 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
637 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
638 break;
639 case 2: /* VLAN filter mask */
640 case 3: /* MC filter mask */
641 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
642 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
643 break;
644 case 4: /* FC MAC addresses */
645 max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
646 max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
647 break;
648 case 5: /* Mgmt MAC addresses */
649 max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
650 max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
651 break;
652 case 6: /* Mgmt VLAN addresses */
653 max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
654 max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
655 break;
656 case 7: /* Mgmt IPv4 address */
657 max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
658 max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
659 break;
660 case 8: /* Mgmt IPv6 address */
661 max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
662 max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
663 break;
664 case 9: /* Mgmt TCP/UDP Dest port */
665 max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
666 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
667 break;
668 default:
669 printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
670 max_index = 0;
671 max_offset = 0;
672 break;
673 }
674 for (index = 0; index < max_index; index++) {
675 for (offset = 0; offset < max_offset; offset++) {
676 val = initial_val
677 | (type << MAC_ADDR_TYPE_SHIFT)
678 | (index << MAC_ADDR_IDX_SHIFT)
679 | (offset);
680 ql_write32(qdev, MAC_ADDR_IDX, val);
681 result_index = 0;
682 while ((result_index & MAC_ADDR_MR) == 0) {
683 result_index = ql_read32(qdev,
684 MAC_ADDR_IDX);
685 }
686 result_data = ql_read32(qdev, MAC_ADDR_DATA);
687 *buf = result_index;
688 buf++;
689 *buf = result_data;
690 buf++;
691 }
692 }
693 }
694}
695
696static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
697{
698 u32 func_num, reg, reg_val;
699 int status;
700
701 for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
702 reg = MPI_NIC_REG_BLOCK
703 | (func_num << MPI_NIC_FUNCTION_SHIFT)
704 | (SEM / 4);
705 status = ql_read_mpi_reg(qdev, reg, &reg_val);
706 *buf = reg_val;
707 /* if the read failed then dead fill the element. */
708 if (!status)
709 *buf = 0xdeadbeef;
710 buf++;
711 }
712}
713
94/* Create a coredump segment header */ 714/* Create a coredump segment header */
95static void ql_build_coredump_seg_header( 715static void ql_build_coredump_seg_header(
96 struct mpi_coredump_segment_header *seg_hdr, 716 struct mpi_coredump_segment_header *seg_hdr,
@@ -103,6 +723,527 @@ static void ql_build_coredump_seg_header(
103 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); 723 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
104} 724}
105 725
726/*
727 * This function should be called when a coredump / probedump
728 * is to be extracted from the HBA. It is assumed there is a
729 * qdev structure that contains the base address of the register
730 * space for this function as well as a coredump structure that
731 * will contain the dump.
732 */
733int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
734{
735 int status;
736 int i;
737
738 if (!mpi_coredump) {
739 QPRINTK(qdev, DRV, ERR,
740 "No memory available.\n");
741 return -ENOMEM;
742 }
743
744 /* Try to get the spinlock, but dont worry if
745 * it isn't available. If the firmware died it
746 * might be holding the sem.
747 */
748 ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
749
750 status = ql_pause_mpi_risc(qdev);
751 if (status) {
752 QPRINTK(qdev, DRV, ERR,
753 "Failed RISC pause. Status = 0x%.08x\n", status);
754 goto err;
755 }
756
757 /* Insert the global header */
758 memset(&(mpi_coredump->mpi_global_header), 0,
759 sizeof(struct mpi_coredump_global_header));
760 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
761 mpi_coredump->mpi_global_header.headerSize =
762 sizeof(struct mpi_coredump_global_header);
763 mpi_coredump->mpi_global_header.imageSize =
764 sizeof(struct ql_mpi_coredump);
765 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
766 sizeof(mpi_coredump->mpi_global_header.idString));
767
768 /* Get generic NIC reg dump */
769 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
770 NIC1_CONTROL_SEG_NUM,
771 sizeof(struct mpi_coredump_segment_header) +
772 sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
773
774 ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
775 NIC2_CONTROL_SEG_NUM,
776 sizeof(struct mpi_coredump_segment_header) +
777 sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
778
779 /* Get XGMac registers. (Segment 18, Rev C. step 21) */
780 ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
781 NIC1_XGMAC_SEG_NUM,
782 sizeof(struct mpi_coredump_segment_header) +
783 sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
784
785 ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
786 NIC2_XGMAC_SEG_NUM,
787 sizeof(struct mpi_coredump_segment_header) +
788 sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
789
790 if (qdev->func & 1) {
791 /* Odd means our function is NIC 2 */
792 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
793 mpi_coredump->nic2_regs[i] =
794 ql_read32(qdev, i * sizeof(u32));
795
796 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
797 mpi_coredump->nic_regs[i] =
798 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
799
800 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
801 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
802 } else {
803 /* Even means our function is NIC 1 */
804 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
805 mpi_coredump->nic_regs[i] =
806 ql_read32(qdev, i * sizeof(u32));
807 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
808 mpi_coredump->nic2_regs[i] =
809 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
810
811 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
812 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
813 }
814
815 /* Rev C. Step 20a */
816 ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
817 XAUI_AN_SEG_NUM,
818 sizeof(struct mpi_coredump_segment_header) +
819 sizeof(mpi_coredump->serdes_xaui_an),
820 "XAUI AN Registers");
821
822 /* Rev C. Step 20b */
823 ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
824 XAUI_HSS_PCS_SEG_NUM,
825 sizeof(struct mpi_coredump_segment_header) +
826 sizeof(mpi_coredump->serdes_xaui_hss_pcs),
827 "XAUI HSS PCS Registers");
828
829 ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
830 sizeof(struct mpi_coredump_segment_header) +
831 sizeof(mpi_coredump->serdes_xfi_an),
832 "XFI AN Registers");
833
834 ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
835 XFI_TRAIN_SEG_NUM,
836 sizeof(struct mpi_coredump_segment_header) +
837 sizeof(mpi_coredump->serdes_xfi_train),
838 "XFI TRAIN Registers");
839
840 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
841 XFI_HSS_PCS_SEG_NUM,
842 sizeof(struct mpi_coredump_segment_header) +
843 sizeof(mpi_coredump->serdes_xfi_hss_pcs),
844 "XFI HSS PCS Registers");
845
846 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
847 XFI_HSS_TX_SEG_NUM,
848 sizeof(struct mpi_coredump_segment_header) +
849 sizeof(mpi_coredump->serdes_xfi_hss_tx),
850 "XFI HSS TX Registers");
851
852 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
853 XFI_HSS_RX_SEG_NUM,
854 sizeof(struct mpi_coredump_segment_header) +
855 sizeof(mpi_coredump->serdes_xfi_hss_rx),
856 "XFI HSS RX Registers");
857
858 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
859 XFI_HSS_PLL_SEG_NUM,
860 sizeof(struct mpi_coredump_segment_header) +
861 sizeof(mpi_coredump->serdes_xfi_hss_pll),
862 "XFI HSS PLL Registers");
863
864 ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
865 XAUI2_AN_SEG_NUM,
866 sizeof(struct mpi_coredump_segment_header) +
867 sizeof(mpi_coredump->serdes2_xaui_an),
868 "XAUI2 AN Registers");
869
870 ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
871 XAUI2_HSS_PCS_SEG_NUM,
872 sizeof(struct mpi_coredump_segment_header) +
873 sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
874 "XAUI2 HSS PCS Registers");
875
876 ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
877 XFI2_AN_SEG_NUM,
878 sizeof(struct mpi_coredump_segment_header) +
879 sizeof(mpi_coredump->serdes2_xfi_an),
880 "XFI2 AN Registers");
881
882 ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
883 XFI2_TRAIN_SEG_NUM,
884 sizeof(struct mpi_coredump_segment_header) +
885 sizeof(mpi_coredump->serdes2_xfi_train),
886 "XFI2 TRAIN Registers");
887
888 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
889 XFI2_HSS_PCS_SEG_NUM,
890 sizeof(struct mpi_coredump_segment_header) +
891 sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
892 "XFI2 HSS PCS Registers");
893
894 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
895 XFI2_HSS_TX_SEG_NUM,
896 sizeof(struct mpi_coredump_segment_header) +
897 sizeof(mpi_coredump->serdes2_xfi_hss_tx),
898 "XFI2 HSS TX Registers");
899
900 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
901 XFI2_HSS_RX_SEG_NUM,
902 sizeof(struct mpi_coredump_segment_header) +
903 sizeof(mpi_coredump->serdes2_xfi_hss_rx),
904 "XFI2 HSS RX Registers");
905
906 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
907 XFI2_HSS_PLL_SEG_NUM,
908 sizeof(struct mpi_coredump_segment_header) +
909 sizeof(mpi_coredump->serdes2_xfi_hss_pll),
910 "XFI2 HSS PLL Registers");
911
912 status = ql_get_serdes_regs(qdev, mpi_coredump);
913 if (status) {
914 QPRINTK(qdev, DRV, ERR,
915 "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
916 status);
917 goto err;
918 }
919
920 ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
921 CORE_SEG_NUM,
922 sizeof(mpi_coredump->core_regs_seg_hdr) +
923 sizeof(mpi_coredump->mpi_core_regs) +
924 sizeof(mpi_coredump->mpi_core_sh_regs),
925 "Core Registers");
926
927 /* Get the MPI Core Registers */
928 status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
929 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
930 if (status)
931 goto err;
932 /* Get the 16 MPI shadow registers */
933 status = ql_get_mpi_shadow_regs(qdev,
934 &mpi_coredump->mpi_core_sh_regs[0]);
935 if (status)
936 goto err;
937
938 /* Get the Test Logic Registers */
939 ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
940 TEST_LOGIC_SEG_NUM,
941 sizeof(struct mpi_coredump_segment_header)
942 + sizeof(mpi_coredump->test_logic_regs),
943 "Test Logic Regs");
944 status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
945 TEST_REGS_ADDR, TEST_REGS_CNT);
946 if (status)
947 goto err;
948
949 /* Get the RMII Registers */
950 ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
951 RMII_SEG_NUM,
952 sizeof(struct mpi_coredump_segment_header)
953 + sizeof(mpi_coredump->rmii_regs),
954 "RMII Registers");
955 status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
956 RMII_REGS_ADDR, RMII_REGS_CNT);
957 if (status)
958 goto err;
959
960 /* Get the FCMAC1 Registers */
961 ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
962 FCMAC1_SEG_NUM,
963 sizeof(struct mpi_coredump_segment_header)
964 + sizeof(mpi_coredump->fcmac1_regs),
965 "FCMAC1 Registers");
966 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
967 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
968 if (status)
969 goto err;
970
971 /* Get the FCMAC2 Registers */
972
973 ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
974 FCMAC2_SEG_NUM,
975 sizeof(struct mpi_coredump_segment_header)
976 + sizeof(mpi_coredump->fcmac2_regs),
977 "FCMAC2 Registers");
978
979 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
980 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
981 if (status)
982 goto err;
983
984 /* Get the FC1 MBX Registers */
985 ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
986 FC1_MBOX_SEG_NUM,
987 sizeof(struct mpi_coredump_segment_header)
988 + sizeof(mpi_coredump->fc1_mbx_regs),
989 "FC1 MBox Regs");
990 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
991 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
992 if (status)
993 goto err;
994
995 /* Get the IDE Registers */
996 ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
997 IDE_SEG_NUM,
998 sizeof(struct mpi_coredump_segment_header)
999 + sizeof(mpi_coredump->ide_regs),
1000 "IDE Registers");
1001 status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
1002 IDE_REGS_ADDR, IDE_REGS_CNT);
1003 if (status)
1004 goto err;
1005
1006 /* Get the NIC1 MBX Registers */
1007 ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
1008 NIC1_MBOX_SEG_NUM,
1009 sizeof(struct mpi_coredump_segment_header)
1010 + sizeof(mpi_coredump->nic1_mbx_regs),
1011 "NIC1 MBox Regs");
1012 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
1013 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1014 if (status)
1015 goto err;
1016
1017 /* Get the SMBus Registers */
1018 ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
1019 SMBUS_SEG_NUM,
1020 sizeof(struct mpi_coredump_segment_header)
1021 + sizeof(mpi_coredump->smbus_regs),
1022 "SMBus Registers");
1023 status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1024 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1025 if (status)
1026 goto err;
1027
1028 /* Get the FC2 MBX Registers */
1029 ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1030 FC2_MBOX_SEG_NUM,
1031 sizeof(struct mpi_coredump_segment_header)
1032 + sizeof(mpi_coredump->fc2_mbx_regs),
1033 "FC2 MBox Regs");
1034 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1035 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1036 if (status)
1037 goto err;
1038
1039 /* Get the NIC2 MBX Registers */
1040 ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1041 NIC2_MBOX_SEG_NUM,
1042 sizeof(struct mpi_coredump_segment_header)
1043 + sizeof(mpi_coredump->nic2_mbx_regs),
1044 "NIC2 MBox Regs");
1045 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1046 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1047 if (status)
1048 goto err;
1049
1050 /* Get the I2C Registers */
1051 ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1052 I2C_SEG_NUM,
1053 sizeof(struct mpi_coredump_segment_header)
1054 + sizeof(mpi_coredump->i2c_regs),
1055 "I2C Registers");
1056 status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1057 I2C_REGS_ADDR, I2C_REGS_CNT);
1058 if (status)
1059 goto err;
1060
1061 /* Get the MEMC Registers */
1062 ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1063 MEMC_SEG_NUM,
1064 sizeof(struct mpi_coredump_segment_header)
1065 + sizeof(mpi_coredump->memc_regs),
1066 "MEMC Registers");
1067 status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1068 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1069 if (status)
1070 goto err;
1071
1072 /* Get the PBus Registers */
1073 ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1074 PBUS_SEG_NUM,
1075 sizeof(struct mpi_coredump_segment_header)
1076 + sizeof(mpi_coredump->pbus_regs),
1077 "PBUS Registers");
1078 status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1079 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1080 if (status)
1081 goto err;
1082
1083 /* Get the MDE Registers */
1084 ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1085 MDE_SEG_NUM,
1086 sizeof(struct mpi_coredump_segment_header)
1087 + sizeof(mpi_coredump->mde_regs),
1088 "MDE Registers");
1089 status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1090 MDE_REGS_ADDR, MDE_REGS_CNT);
1091 if (status)
1092 goto err;
1093
1094 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1095 MISC_NIC_INFO_SEG_NUM,
1096 sizeof(struct mpi_coredump_segment_header)
1097 + sizeof(mpi_coredump->misc_nic_info),
1098 "MISC NIC INFO");
1099 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1100 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1101 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1102 mpi_coredump->misc_nic_info.function = qdev->func;
1103
1104 /* Segment 31 */
1105 /* Get indexed register values. */
1106 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1107 INTR_STATES_SEG_NUM,
1108 sizeof(struct mpi_coredump_segment_header)
1109 + sizeof(mpi_coredump->intr_states),
1110 "INTR States");
1111 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1112
1113 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1114 CAM_ENTRIES_SEG_NUM,
1115 sizeof(struct mpi_coredump_segment_header)
1116 + sizeof(mpi_coredump->cam_entries),
1117 "CAM Entries");
1118 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1119 if (status)
1120 goto err;
1121
1122 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1123 ROUTING_WORDS_SEG_NUM,
1124 sizeof(struct mpi_coredump_segment_header)
1125 + sizeof(mpi_coredump->nic_routing_words),
1126 "Routing Words");
1127 status = ql_get_routing_entries(qdev,
1128 &mpi_coredump->nic_routing_words[0]);
1129 if (status)
1130 goto err;
1131
1132 /* Segment 34 (Rev C. step 23) */
1133 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1134 ETS_SEG_NUM,
1135 sizeof(struct mpi_coredump_segment_header)
1136 + sizeof(mpi_coredump->ets),
1137 "ETS Registers");
1138 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1139 if (status)
1140 goto err;
1141
1142 ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1143 PROBE_DUMP_SEG_NUM,
1144 sizeof(struct mpi_coredump_segment_header)
1145 + sizeof(mpi_coredump->probe_dump),
1146 "Probe Dump");
1147 ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1148
1149 ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1150 ROUTING_INDEX_SEG_NUM,
1151 sizeof(struct mpi_coredump_segment_header)
1152 + sizeof(mpi_coredump->routing_regs),
1153 "Routing Regs");
1154 status = ql_get_routing_index_registers(qdev,
1155 &mpi_coredump->routing_regs[0]);
1156 if (status)
1157 goto err;
1158
1159 ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1160 MAC_PROTOCOL_SEG_NUM,
1161 sizeof(struct mpi_coredump_segment_header)
1162 + sizeof(mpi_coredump->mac_prot_regs),
1163 "MAC Prot Regs");
1164 ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1165
1166 /* Get the semaphore registers for all 5 functions */
1167 ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1168 SEM_REGS_SEG_NUM,
1169 sizeof(struct mpi_coredump_segment_header) +
1170 sizeof(mpi_coredump->sem_regs), "Sem Registers");
1171
1172 ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1173
1174 /* Prevent the mpi restarting while we dump the memory.*/
1175 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1176
1177 /* clear the pause */
1178 status = ql_unpause_mpi_risc(qdev);
1179 if (status) {
1180 QPRINTK(qdev, DRV, ERR,
1181 "Failed RISC unpause. Status = 0x%.08x\n", status);
1182 goto err;
1183 }
1184
1185 /* Reset the RISC so we can dump RAM */
1186 status = ql_hard_reset_mpi_risc(qdev);
1187 if (status) {
1188 QPRINTK(qdev, DRV, ERR,
1189 "Failed RISC reset. Status = 0x%.08x\n", status);
1190 goto err;
1191 }
1192
1193 ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1194 WCS_RAM_SEG_NUM,
1195 sizeof(struct mpi_coredump_segment_header)
1196 + sizeof(mpi_coredump->code_ram),
1197 "WCS RAM");
1198 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1199 CODE_RAM_ADDR, CODE_RAM_CNT);
1200 if (status) {
1201 QPRINTK(qdev, DRV, ERR,
1202 "Failed Dump of CODE RAM. Status = 0x%.08x\n", status);
1203 goto err;
1204 }
1205
1206 /* Insert the segment header */
1207 ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1208 MEMC_RAM_SEG_NUM,
1209 sizeof(struct mpi_coredump_segment_header)
1210 + sizeof(mpi_coredump->memc_ram),
1211 "MEMC RAM");
1212 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1213 MEMC_RAM_ADDR, MEMC_RAM_CNT);
1214 if (status) {
1215 QPRINTK(qdev, DRV, ERR,
1216 "Failed Dump of MEMC RAM. Status = 0x%.08x\n", status);
1217 goto err;
1218 }
1219err:
1220 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
1221 return status;
1222
1223}
1224
1225static void ql_get_core_dump(struct ql_adapter *qdev)
1226{
1227 if (!ql_own_firmware(qdev)) {
1228 QPRINTK(qdev, DRV, ERR, "%s: Don't own firmware!\n",
1229 qdev->ndev->name);
1230 return;
1231 }
1232
1233 if (!netif_running(qdev->ndev)) {
1234 QPRINTK(qdev, IFUP, ERR,
1235 "Force Coredump can only be done from interface "
1236 "that is up.\n");
1237 return;
1238 }
1239
1240 if (ql_mb_sys_err(qdev)) {
1241 QPRINTK(qdev, IFUP, ERR,
1242 "Fail force coredump with ql_mb_sys_err().\n");
1243 return;
1244 }
1245}
1246
106void ql_gen_reg_dump(struct ql_adapter *qdev, 1247void ql_gen_reg_dump(struct ql_adapter *qdev,
107 struct ql_reg_dump *mpi_coredump) 1248 struct ql_reg_dump *mpi_coredump)
108{ 1249{
@@ -178,6 +1319,36 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
178 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1319 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
179 if (status) 1320 if (status)
180 return; 1321 return;
1322
1323 if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
1324 ql_get_core_dump(qdev);
1325}
1326
1327/* Coredump to messages log file using separate worker thread */
1328void ql_mpi_core_to_log(struct work_struct *work)
1329{
1330 struct ql_adapter *qdev =
1331 container_of(work, struct ql_adapter, mpi_core_to_log.work);
1332 u32 *tmp, count;
1333 int i;
1334
1335 count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
1336 tmp = (u32 *)qdev->mpi_coredump;
1337 QPRINTK(qdev, DRV, DEBUG, "Core is dumping to log file!\n");
1338
1339 for (i = 0; i < count; i += 8) {
1340 printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
1341 "%.08x %.08x %.08x \n", i,
1342 tmp[i + 0],
1343 tmp[i + 1],
1344 tmp[i + 2],
1345 tmp[i + 3],
1346 tmp[i + 4],
1347 tmp[i + 5],
1348 tmp[i + 6],
1349 tmp[i + 7]);
1350 msleep(5);
1351 }
181} 1352}
182 1353
183#ifdef QL_REG_DUMP 1354#ifdef QL_REG_DUMP
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 894a7c84fae..5be3ae2f5a1 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -73,7 +73,19 @@ static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 76static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
87
88static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, 89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, 90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
79 /* required last entry */ 91 /* required last entry */
@@ -452,9 +464,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 if (set) { 464 if (set) {
453 addr = &qdev->ndev->dev_addr[0]; 465 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG, 466 QPRINTK(qdev, IFUP, DEBUG,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n", 467 "Set Mac addr %pM\n", addr);
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else { 468 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN); 469 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0]; 470 addr = &zero_mac_addr[0];
@@ -1433,6 +1443,254 @@ map_error:
1433 return NETDEV_TX_BUSY; 1443 return NETDEV_TX_BUSY;
1434} 1444}
1435 1445
1446/* Process an inbound completion from an rx ring. */
1447static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1448 struct rx_ring *rx_ring,
1449 struct ib_mac_iocb_rsp *ib_mac_rsp,
1450 u32 length,
1451 u16 vlan_id)
1452{
1453 struct sk_buff *skb;
1454 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1455 struct skb_frag_struct *rx_frag;
1456 int nr_frags;
1457 struct napi_struct *napi = &rx_ring->napi;
1458
1459 napi->dev = qdev->ndev;
1460
1461 skb = napi_get_frags(napi);
1462 if (!skb) {
1463 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
1464 rx_ring->rx_dropped++;
1465 put_page(lbq_desc->p.pg_chunk.page);
1466 return;
1467 }
1468 prefetch(lbq_desc->p.pg_chunk.va);
1469 rx_frag = skb_shinfo(skb)->frags;
1470 nr_frags = skb_shinfo(skb)->nr_frags;
1471 rx_frag += nr_frags;
1472 rx_frag->page = lbq_desc->p.pg_chunk.page;
1473 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1474 rx_frag->size = length;
1475
1476 skb->len += length;
1477 skb->data_len += length;
1478 skb->truesize += length;
1479 skb_shinfo(skb)->nr_frags++;
1480
1481 rx_ring->rx_packets++;
1482 rx_ring->rx_bytes += length;
1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
1484 skb_record_rx_queue(skb, rx_ring->cq_id);
1485 if (qdev->vlgrp && (vlan_id != 0xffff))
1486 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1487 else
1488 napi_gro_frags(napi);
1489}
1490
1491/* Process an inbound completion from an rx ring. */
1492static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 u32 length,
1496 u16 vlan_id)
1497{
1498 struct net_device *ndev = qdev->ndev;
1499 struct sk_buff *skb = NULL;
1500 void *addr;
1501 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1502 struct napi_struct *napi = &rx_ring->napi;
1503
1504 skb = netdev_alloc_skb(ndev, length);
1505 if (!skb) {
1506 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
1507 "need to unwind!.\n");
1508 rx_ring->rx_dropped++;
1509 put_page(lbq_desc->p.pg_chunk.page);
1510 return;
1511 }
1512
1513 addr = lbq_desc->p.pg_chunk.va;
1514 prefetch(addr);
1515
1516
1517 /* Frame error, so drop the packet. */
1518 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1519 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1520 ib_mac_rsp->flags2);
1521 rx_ring->rx_errors++;
1522 goto err_out;
1523 }
1524
1525 /* The max framesize filter on this chip is set higher than
1526 * MTU since FCoE uses 2k frames.
1527 */
1528 if (skb->len > ndev->mtu + ETH_HLEN) {
1529 QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
1530 rx_ring->rx_dropped++;
1531 goto err_out;
1532 }
1533 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1534 QPRINTK(qdev, RX_STATUS, DEBUG,
1535 "%d bytes of headers and data in large. Chain "
1536 "page to new skb and pull tail.\n", length);
1537 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1538 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1539 length-ETH_HLEN);
1540 skb->len += length-ETH_HLEN;
1541 skb->data_len += length-ETH_HLEN;
1542 skb->truesize += length-ETH_HLEN;
1543
1544 rx_ring->rx_packets++;
1545 rx_ring->rx_bytes += skb->len;
1546 skb->protocol = eth_type_trans(skb, ndev);
1547 skb->ip_summed = CHECKSUM_NONE;
1548
1549 if (qdev->rx_csum &&
1550 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1551 /* TCP frame. */
1552 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1553 QPRINTK(qdev, RX_STATUS, DEBUG,
1554 "TCP checksum done!\n");
1555 skb->ip_summed = CHECKSUM_UNNECESSARY;
1556 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1557 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1558 /* Unfragmented ipv4 UDP frame. */
1559 struct iphdr *iph = (struct iphdr *) skb->data;
1560 if (!(iph->frag_off &
1561 cpu_to_be16(IP_MF|IP_OFFSET))) {
1562 skb->ip_summed = CHECKSUM_UNNECESSARY;
1563 QPRINTK(qdev, RX_STATUS, DEBUG,
1564 "TCP checksum done!\n");
1565 }
1566 }
1567 }
1568
1569 skb_record_rx_queue(skb, rx_ring->cq_id);
1570 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1571 if (qdev->vlgrp && (vlan_id != 0xffff))
1572 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1573 else
1574 napi_gro_receive(napi, skb);
1575 } else {
1576 if (qdev->vlgrp && (vlan_id != 0xffff))
1577 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1578 else
1579 netif_receive_skb(skb);
1580 }
1581 return;
1582err_out:
1583 dev_kfree_skb_any(skb);
1584 put_page(lbq_desc->p.pg_chunk.page);
1585}
1586
1587/* Process an inbound completion from an rx ring. */
1588static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1589 struct rx_ring *rx_ring,
1590 struct ib_mac_iocb_rsp *ib_mac_rsp,
1591 u32 length,
1592 u16 vlan_id)
1593{
1594 struct net_device *ndev = qdev->ndev;
1595 struct sk_buff *skb = NULL;
1596 struct sk_buff *new_skb = NULL;
1597 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1598
1599 skb = sbq_desc->p.skb;
1600 /* Allocate new_skb and copy */
1601 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1602 if (new_skb == NULL) {
1603 QPRINTK(qdev, PROBE, ERR,
1604 "No skb available, drop the packet.\n");
1605 rx_ring->rx_dropped++;
1606 return;
1607 }
1608 skb_reserve(new_skb, NET_IP_ALIGN);
1609 memcpy(skb_put(new_skb, length), skb->data, length);
1610 skb = new_skb;
1611
1612 /* Frame error, so drop the packet. */
1613 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1614 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1615 ib_mac_rsp->flags2);
1616 dev_kfree_skb_any(skb);
1617 rx_ring->rx_errors++;
1618 return;
1619 }
1620
1621 /* loopback self test for ethtool */
1622 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1623 ql_check_lb_frame(qdev, skb);
1624 dev_kfree_skb_any(skb);
1625 return;
1626 }
1627
1628 /* The max framesize filter on this chip is set higher than
1629 * MTU since FCoE uses 2k frames.
1630 */
1631 if (skb->len > ndev->mtu + ETH_HLEN) {
1632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_dropped++;
1634 return;
1635 }
1636
1637 prefetch(skb->data);
1638 skb->dev = ndev;
1639 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1640 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1643 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1645 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1646 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1647 }
1648 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1649 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1650
1651 rx_ring->rx_packets++;
1652 rx_ring->rx_bytes += skb->len;
1653 skb->protocol = eth_type_trans(skb, ndev);
1654 skb->ip_summed = CHECKSUM_NONE;
1655
1656 /* If rx checksum is on, and there are no
1657 * csum or frame errors.
1658 */
1659 if (qdev->rx_csum &&
1660 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1661 /* TCP frame. */
1662 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1663 QPRINTK(qdev, RX_STATUS, DEBUG,
1664 "TCP checksum done!\n");
1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1667 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1668 /* Unfragmented ipv4 UDP frame. */
1669 struct iphdr *iph = (struct iphdr *) skb->data;
1670 if (!(iph->frag_off &
1671 cpu_to_be16(IP_MF|IP_OFFSET))) {
1672 skb->ip_summed = CHECKSUM_UNNECESSARY;
1673 QPRINTK(qdev, RX_STATUS, DEBUG,
1674 "TCP checksum done!\n");
1675 }
1676 }
1677 }
1678
1679 skb_record_rx_queue(skb, rx_ring->cq_id);
1680 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1681 if (qdev->vlgrp && (vlan_id != 0xffff))
1682 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1683 vlan_id, skb);
1684 else
1685 napi_gro_receive(&rx_ring->napi, skb);
1686 } else {
1687 if (qdev->vlgrp && (vlan_id != 0xffff))
1688 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1689 else
1690 netif_receive_skb(skb);
1691 }
1692}
1693
1436static void ql_realign_skb(struct sk_buff *skb, int len) 1694static void ql_realign_skb(struct sk_buff *skb, int len)
1437{ 1695{
1438 void *temp_addr = skb->data; 1696 void *temp_addr = skb->data;
@@ -1646,14 +1904,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1646} 1904}
1647 1905
1648/* Process an inbound completion from an rx ring. */ 1906/* Process an inbound completion from an rx ring. */
1649static void ql_process_mac_rx_intr(struct ql_adapter *qdev, 1907static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1650 struct rx_ring *rx_ring, 1908 struct rx_ring *rx_ring,
1651 struct ib_mac_iocb_rsp *ib_mac_rsp) 1909 struct ib_mac_iocb_rsp *ib_mac_rsp,
1910 u16 vlan_id)
1652{ 1911{
1653 struct net_device *ndev = qdev->ndev; 1912 struct net_device *ndev = qdev->ndev;
1654 struct sk_buff *skb = NULL; 1913 struct sk_buff *skb = NULL;
1655 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK)
1657 1914
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp); 1915 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1659 1916
@@ -1753,6 +2010,65 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1753 } 2010 }
1754} 2011}
1755 2012
2013/* Process an inbound completion from an rx ring. */
2014static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2015 struct rx_ring *rx_ring,
2016 struct ib_mac_iocb_rsp *ib_mac_rsp)
2017{
2018 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2019 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2020 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2021 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2022
2023 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2024
2025 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2026 /* The data and headers are split into
2027 * separate buffers.
2028 */
2029 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2030 vlan_id);
2031 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2032 /* The data fit in a single small buffer.
2033 * Allocate a new skb, copy the data and
2034 * return the buffer to the free pool.
2035 */
2036 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2037 length, vlan_id);
2038 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2039 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2041 /* TCP packet in a page chunk that's been checksummed.
2042 * Tack it on to our GRO skb and let it go.
2043 */
2044 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2045 length, vlan_id);
2046 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2047 /* Non-TCP packet in a page chunk. Allocate an
2048 * skb, tack it on frags, and send it up.
2049 */
2050 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2051 length, vlan_id);
2052 } else {
2053 struct bq_desc *lbq_desc;
2054
2055 /* Free small buffer that holds the IAL */
2056 lbq_desc = ql_get_curr_sbuf(rx_ring);
2057 QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
2058 length, qdev->ndev->mtu);
2059
2060 /* Unwind the large buffers for this frame. */
2061 while (length > 0) {
2062 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2063 length -= (length < rx_ring->lbq_buf_size) ?
2064 length : rx_ring->lbq_buf_size;
2065 put_page(lbq_desc->p.pg_chunk.page);
2066 }
2067 }
2068
2069 return (unsigned long)length;
2070}
2071
1756/* Process an outbound completion from an rx ring. */ 2072/* Process an outbound completion from an rx ring. */
1757static void ql_process_mac_tx_intr(struct ql_adapter *qdev, 2073static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1758 struct ob_mac_iocb_rsp *mac_rsp) 2074 struct ob_mac_iocb_rsp *mac_rsp)
@@ -3332,15 +3648,15 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3332 3648
3333 /* Enable the function, set pagesize, enable error checking. */ 3649 /* Enable the function, set pagesize, enable error checking. */
3334 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | 3650 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3335 FSC_EC | FSC_VM_PAGE_4K | FSC_SH; 3651 FSC_EC | FSC_VM_PAGE_4K;
3652 value |= SPLT_SETTING;
3336 3653
3337 /* Set/clear header splitting. */ 3654 /* Set/clear header splitting. */
3338 mask = FSC_VM_PAGESIZE_MASK | 3655 mask = FSC_VM_PAGESIZE_MASK |
3339 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); 3656 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3340 ql_write32(qdev, FSC, mask | value); 3657 ql_write32(qdev, FSC, mask | value);
3341 3658
3342 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | 3659 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3343 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
3344 3660
3345 /* Set RX packet routing to use port/pci function on which the 3661 /* Set RX packet routing to use port/pci function on which the
3346 * packet arrived on in addition to usual frame routing. 3662 * packet arrived on in addition to usual frame routing.
@@ -3538,6 +3854,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3538 cancel_delayed_work_sync(&qdev->mpi_reset_work); 3854 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3539 cancel_delayed_work_sync(&qdev->mpi_work); 3855 cancel_delayed_work_sync(&qdev->mpi_work);
3540 cancel_delayed_work_sync(&qdev->mpi_idc_work); 3856 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3857 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3541 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3858 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3542 3859
3543 for (i = 0; i < qdev->rss_ring_count; i++) 3860 for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4094,6 +4411,7 @@ static void ql_release_all(struct pci_dev *pdev)
4094 iounmap(qdev->reg_base); 4411 iounmap(qdev->reg_base);
4095 if (qdev->doorbell_area) 4412 if (qdev->doorbell_area)
4096 iounmap(qdev->doorbell_area); 4413 iounmap(qdev->doorbell_area);
4414 vfree(qdev->mpi_coredump);
4097 pci_release_regions(pdev); 4415 pci_release_regions(pdev);
4098 pci_set_drvdata(pdev, NULL); 4416 pci_set_drvdata(pdev, NULL);
4099} 4417}
@@ -4175,6 +4493,17 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4175 spin_lock_init(&qdev->hw_lock); 4493 spin_lock_init(&qdev->hw_lock);
4176 spin_lock_init(&qdev->stats_lock); 4494 spin_lock_init(&qdev->stats_lock);
4177 4495
4496 if (qlge_mpi_coredump) {
4497 qdev->mpi_coredump =
4498 vmalloc(sizeof(struct ql_mpi_coredump));
4499 if (qdev->mpi_coredump == NULL) {
4500 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4501 err = -ENOMEM;
4502 goto err_out2;
4503 }
4504 if (qlge_force_coredump)
4505 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4506 }
4178 /* make sure the EEPROM is good */ 4507 /* make sure the EEPROM is good */
4179 err = qdev->nic_ops->get_flash(qdev); 4508 err = qdev->nic_ops->get_flash(qdev);
4180 if (err) { 4509 if (err) {
@@ -4204,6 +4533,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4204 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 4533 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4205 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); 4534 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4206 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 4535 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4536 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4207 init_completion(&qdev->ide_completion); 4537 init_completion(&qdev->ide_completion);
4208 4538
4209 if (!cards_found) { 4539 if (!cards_found) {
@@ -4327,6 +4657,7 @@ static void ql_eeh_close(struct net_device *ndev)
4327 cancel_delayed_work_sync(&qdev->mpi_reset_work); 4657 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4328 cancel_delayed_work_sync(&qdev->mpi_work); 4658 cancel_delayed_work_sync(&qdev->mpi_work);
4329 cancel_delayed_work_sync(&qdev->mpi_idc_work); 4659 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4660 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4330 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 4661 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4331 4662
4332 for (i = 0; i < qdev->rss_ring_count; i++) 4663 for (i = 0; i < qdev->rss_ring_count; i++)
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e2b2286102d..e2c846f17fc 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,5 +1,54 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3int ql_unpause_mpi_risc(struct ql_adapter *qdev)
4{
5 u32 tmp;
6
7 /* Un-pause the RISC */
8 tmp = ql_read32(qdev, CSR);
9 if (!(tmp & CSR_RP))
10 return -EIO;
11
12 ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
13 return 0;
14}
15
16int ql_pause_mpi_risc(struct ql_adapter *qdev)
17{
18 u32 tmp;
19 int count = UDELAY_COUNT;
20
21 /* Pause the RISC */
22 ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
23 do {
24 tmp = ql_read32(qdev, CSR);
25 if (tmp & CSR_RP)
26 break;
27 mdelay(UDELAY_DELAY);
28 count--;
29 } while (count);
30 return (count == 0) ? -ETIMEDOUT : 0;
31}
32
33int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
34{
35 u32 tmp;
36 int count = UDELAY_COUNT;
37
38 /* Reset the RISC */
39 ql_write32(qdev, CSR, CSR_CMD_SET_RST);
40 do {
41 tmp = ql_read32(qdev, CSR);
42 if (tmp & CSR_RR) {
43 ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
44 break;
45 }
46 mdelay(UDELAY_DELAY);
47 count--;
48 } while (count);
49 return (count == 0) ? -ETIMEDOUT : 0;
50}
51
3int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 52int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
4{ 53{
5 int status; 54 int status;
@@ -45,6 +94,35 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
45 return status; 94 return status;
46} 95}
47 96
97/* Determine if we are in charge of the firwmare. If
98 * we are the lower of the 2 NIC pcie functions, or if
99 * we are the higher function and the lower function
100 * is not enabled.
101 */
102int ql_own_firmware(struct ql_adapter *qdev)
103{
104 u32 temp;
105
106 /* If we are the lower of the 2 NIC functions
107 * on the chip the we are responsible for
108 * core dump and firmware reset after an error.
109 */
110 if (qdev->func < qdev->alt_func)
111 return 1;
112
113 /* If we are the higher of the 2 NIC functions
114 * on the chip and the lower function is not
115 * enabled, then we are responsible for
116 * core dump and firmware reset after an error.
117 */
118 temp = ql_read32(qdev, STS);
119 if (!(temp & (1 << (8 + qdev->alt_func))))
120 return 1;
121
122 return 0;
123
124}
125
48static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) 126static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
49{ 127{
50 int i, status; 128 int i, status;
@@ -529,6 +607,22 @@ end:
529 return status; 607 return status;
530} 608}
531 609
610int ql_mb_sys_err(struct ql_adapter *qdev)
611{
612 struct mbox_params mbc;
613 struct mbox_params *mbcp = &mbc;
614 int status;
615
616 memset(mbcp, 0, sizeof(struct mbox_params));
617
618 mbcp->in_count = 1;
619 mbcp->out_count = 0;
620
621 mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
622
623 status = ql_mailbox_command(qdev, mbcp);
624 return status;
625}
532 626
533/* Get MPI firmware version. This will be used for 627/* Get MPI firmware version. This will be used for
534 * driver banner and for ethtool info. 628 * driver banner and for ethtool info.
@@ -669,6 +763,63 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
669 return status; 763 return status;
670} 764}
671 765
766int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
767 u32 size)
768{
769 int status = 0;
770 struct mbox_params mbc;
771 struct mbox_params *mbcp = &mbc;
772
773 memset(mbcp, 0, sizeof(struct mbox_params));
774
775 mbcp->in_count = 9;
776 mbcp->out_count = 1;
777
778 mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
779 mbcp->mbox_in[1] = LSW(addr);
780 mbcp->mbox_in[2] = MSW(req_dma);
781 mbcp->mbox_in[3] = LSW(req_dma);
782 mbcp->mbox_in[4] = MSW(size);
783 mbcp->mbox_in[5] = LSW(size);
784 mbcp->mbox_in[6] = MSW(MSD(req_dma));
785 mbcp->mbox_in[7] = LSW(MSD(req_dma));
786 mbcp->mbox_in[8] = MSW(addr);
787
788
789 status = ql_mailbox_command(qdev, mbcp);
790 if (status)
791 return status;
792
793 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
794 QPRINTK(qdev, DRV, ERR,
795 "Failed to dump risc RAM.\n");
796 status = -EIO;
797 }
798 return status;
799}
800
801/* Issue a mailbox command to dump RISC RAM. */
802int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
803 u32 ram_addr, int word_count)
804{
805 int status;
806 char *my_buf;
807 dma_addr_t buf_dma;
808
809 my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
810 &buf_dma);
811 if (!my_buf)
812 return -EIO;
813
814 status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
815 if (!status)
816 memcpy(buf, my_buf, word_count * sizeof(u32));
817
818 pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
819 buf_dma);
820 return status;
821}
822
672/* Get link settings and maximum frame size settings 823/* Get link settings and maximum frame size settings
673 * for the current port. 824 * for the current port.
674 * Most likely will block. 825 * Most likely will block.
@@ -1143,5 +1294,19 @@ void ql_mpi_reset_work(struct work_struct *work)
1143 cancel_delayed_work_sync(&qdev->mpi_work); 1294 cancel_delayed_work_sync(&qdev->mpi_work);
1144 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 1295 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
1145 cancel_delayed_work_sync(&qdev->mpi_idc_work); 1296 cancel_delayed_work_sync(&qdev->mpi_idc_work);
1297 /* If we're not the dominant NIC function,
1298 * then there is nothing to do.
1299 */
1300 if (!ql_own_firmware(qdev)) {
1301 QPRINTK(qdev, DRV, ERR, "Don't own firmware!\n");
1302 return;
1303 }
1304
1305 if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
1306 QPRINTK(qdev, DRV, ERR, "Core is dumped!\n");
1307 qdev->core_is_dumped = 1;
1308 queue_delayed_work(qdev->workqueue,
1309 &qdev->mpi_core_to_log, 5 * HZ);
1310 }
1146 ql_soft_reset_mpi_risc(qdev); 1311 ql_soft_reset_mpi_risc(qdev);
1147} 1312}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index f03e2e4a15a..d68ba7a5863 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -1222,7 +1222,7 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
1222} 1222}
1223 1223
1224 1224
1225static struct pci_device_id r6040_pci_tbl[] = { 1225static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
1226 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) }, 1226 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
1227 { 0 } 1227 { 0 }
1228}; 1228};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 60f96c468a2..c1bb24cf079 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -168,7 +168,7 @@ static void rtl_hw_start_8169(struct net_device *);
168static void rtl_hw_start_8168(struct net_device *); 168static void rtl_hw_start_8168(struct net_device *);
169static void rtl_hw_start_8101(struct net_device *); 169static void rtl_hw_start_8101(struct net_device *);
170 170
171static struct pci_device_id rtl8169_pci_tbl[] = { 171static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
172 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, 172 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
173 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, 173 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
174 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 174 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
@@ -3188,15 +3188,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3188 if (netif_msg_probe(tp)) { 3188 if (netif_msg_probe(tp)) {
3189 u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff; 3189 u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff;
3190 3190
3191 printk(KERN_INFO "%s: %s at 0x%lx, " 3191 printk(KERN_INFO "%s: %s at 0x%lx, %pM, XID %08x IRQ %d\n",
3192 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
3193 "XID %08x IRQ %d\n",
3194 dev->name, 3192 dev->name,
3195 rtl_chip_info[tp->chipset].name, 3193 rtl_chip_info[tp->chipset].name,
3196 dev->base_addr, 3194 dev->base_addr, dev->dev_addr, xid, dev->irq);
3197 dev->dev_addr[0], dev->dev_addr[1],
3198 dev->dev_addr[2], dev->dev_addr[3],
3199 dev->dev_addr[4], dev->dev_addr[5], xid, dev->irq);
3200 } 3195 }
3201 3196
3202 rtl8169_init_phy(dev, tp); 3197 rtl8169_init_phy(dev, tp);
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 1c257098d0a..266baf53496 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1688,7 +1688,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1688 } 1688 }
1689} 1689}
1690 1690
1691static struct pci_device_id rr_pci_tbl[] = { 1691static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = {
1692 { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER, 1692 { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
1693 PCI_ANY_ID, PCI_ANY_ID, }, 1693 PCI_ANY_ID, PCI_ANY_ID, },
1694 { 0,} 1694 { 0,}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 3c4836d0898..d1664586e8f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -523,7 +523,7 @@ module_param_array(rts_frm_len, uint, NULL, 0);
523 * S2IO device table. 523 * S2IO device table.
524 * This table lists all the devices that this driver supports. 524 * This table lists all the devices that this driver supports.
525 */ 525 */
526static struct pci_device_id s2io_tbl[] __devinitdata = { 526static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
527 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, 527 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
528 PCI_ANY_ID, PCI_ANY_ID}, 528 PCI_ANY_ID, PCI_ANY_ID},
529 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, 529 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index e35050322f9..fd8cb506a2b 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1589,7 +1589,7 @@ out:
1589 return 0; 1589 return 0;
1590} 1590}
1591 1591
1592static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = { 1592static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
1593 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, 1593 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, 1594 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1595 { PCI_DEVICE(0x1088, 0x2031) }, 1595 { PCI_DEVICE(0x1088, 0x2031) },
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 103e8b0e2a0..62d5cd51a9d 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1940,7 +1940,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1940 **************************************************************************/ 1940 **************************************************************************/
1941 1941
1942/* PCI device ID table */ 1942/* PCI device ID table */
1943static struct pci_device_id efx_pci_table[] __devinitdata = { 1943static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
1944 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 1944 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1945 .driver_data = (unsigned long) &falcon_a1_nic_type}, 1945 .driver_data = (unsigned long) &falcon_a1_nic_type},
1946 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 1946 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index a615ac05153..7eff0a615cb 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -79,8 +79,6 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
79 79
80/* Global */ 80/* Global */
81extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 81extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
82extern void efx_suspend(struct efx_nic *efx);
83extern void efx_resume(struct efx_nic *efx);
84extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, 82extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
85 int rx_usecs, bool rx_adaptive); 83 int rx_usecs, bool rx_adaptive);
86extern int efx_request_power(struct efx_nic *efx, int mw, const char *name); 84extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 6c0bbed8c47..d9f9c02a928 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -196,7 +196,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
196 efx->phy_op->get_settings(efx, ecmd); 196 efx->phy_op->get_settings(efx, ecmd);
197 mutex_unlock(&efx->mac_lock); 197 mutex_unlock(&efx->mac_lock);
198 198
199 /* Falcon GMAC does not support 1000Mbps HD */ 199 /* GMAC does not support 1000Mbps HD */
200 ecmd->supported &= ~SUPPORTED_1000baseT_Half; 200 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
201 /* Both MACs support pause frames (bidirectional and respond-only) */ 201 /* Both MACs support pause frames (bidirectional and respond-only) */
202 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 202 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -216,7 +216,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
216 struct efx_nic *efx = netdev_priv(net_dev); 216 struct efx_nic *efx = netdev_priv(net_dev);
217 int rc; 217 int rc;
218 218
219 /* Falcon GMAC does not support 1000Mbps HD */ 219 /* GMAC does not support 1000Mbps HD */
220 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { 220 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
221 EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" 221 EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
222 " setting\n"); 222 " setting\n");
@@ -342,8 +342,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
342 unsigned int n = 0, i; 342 unsigned int n = 0, i;
343 enum efx_loopback_mode mode; 343 enum efx_loopback_mode mode;
344 344
345 efx_fill_test(n++, strings, data, &tests->mdio, 345 efx_fill_test(n++, strings, data, &tests->phy_alive,
346 "core", 0, "mdio", NULL); 346 "phy", 0, "alive", NULL);
347 efx_fill_test(n++, strings, data, &tests->nvram, 347 efx_fill_test(n++, strings, data, &tests->nvram,
348 "core", 0, "nvram", NULL); 348 "core", 0, "nvram", NULL);
349 efx_fill_test(n++, strings, data, &tests->interrupt, 349 efx_fill_test(n++, strings, data, &tests->interrupt,
@@ -379,7 +379,7 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
379 if (name == NULL) 379 if (name == NULL)
380 break; 380 break;
381 381
382 efx_fill_test(n++, strings, data, &tests->phy[i], 382 efx_fill_test(n++, strings, data, &tests->phy_ext[i],
383 "phy", 0, name, NULL); 383 "phy", 0, name, NULL);
384 } 384 }
385 } 385 }
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 9d009c46e96..1b8d83657aa 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -909,6 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx)
909 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; 909 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
910 else 910 else
911 efx->wanted_fc = EFX_FC_RX; 911 efx->wanted_fc = EFX_FC_RX;
912 if (efx->mdio.mmds & MDIO_DEVS_AN)
913 efx->wanted_fc |= EFX_FC_AUTO;
912 914
913 /* Allocate buffer for stats */ 915 /* Allocate buffer for stats */
914 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, 916 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
@@ -1006,7 +1008,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
1006 1008
1007static const struct efx_nic_register_test falcon_b0_register_tests[] = { 1009static const struct efx_nic_register_test falcon_b0_register_tests[] = {
1008 { FR_AZ_ADR_REGION, 1010 { FR_AZ_ADR_REGION,
1009 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, 1011 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1010 { FR_AZ_RX_CFG, 1012 { FR_AZ_RX_CFG,
1011 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, 1013 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1012 { FR_AZ_TX_CFG, 1014 { FR_AZ_TX_CFG,
@@ -1728,7 +1730,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
1728 1730
1729/************************************************************************** 1731/**************************************************************************
1730 * 1732 *
1731 * Revision-dependent attributes used by efx.c 1733 * Revision-dependent attributes used by efx.c and nic.c
1732 * 1734 *
1733 ************************************************************************** 1735 **************************************************************************
1734 */ 1736 */
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 9f035b9f035..86610db2cff 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -896,29 +896,73 @@ fail:
896 return rc; 896 return rc;
897} 897}
898 898
899int efx_mcdi_handle_assertion(struct efx_nic *efx) 899static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
900{
901 u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
902 u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
903 int rc;
904
905 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
906
907 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
908 outbuf, sizeof(outbuf), NULL);
909 if (rc)
910 return rc;
911
912 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
913 case MC_CMD_NVRAM_TEST_PASS:
914 case MC_CMD_NVRAM_TEST_NOTSUPP:
915 return 0;
916 default:
917 return -EIO;
918 }
919}
920
921int efx_mcdi_nvram_test_all(struct efx_nic *efx)
922{
923 u32 nvram_types;
924 unsigned int type;
925 int rc;
926
927 rc = efx_mcdi_nvram_types(efx, &nvram_types);
928 if (rc)
929 return rc;
930
931 type = 0;
932 while (nvram_types != 0) {
933 if (nvram_types & 1) {
934 rc = efx_mcdi_nvram_test(efx, type);
935 if (rc)
936 return rc;
937 }
938 type++;
939 nvram_types >>= 1;
940 }
941
942 return 0;
943}
944
945static int efx_mcdi_read_assertion(struct efx_nic *efx)
900{ 946{
901 union { 947 u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
902 u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN]; 948 u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
903 u8 reboot[MC_CMD_REBOOT_IN_LEN];
904 } inbuf;
905 u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
906 unsigned int flags, index, ofst; 949 unsigned int flags, index, ofst;
907 const char *reason; 950 const char *reason;
908 size_t outlen; 951 size_t outlen;
909 int retry; 952 int retry;
910 int rc; 953 int rc;
911 954
912 /* Check if the MC is in the assertion handler, retrying twice. Once 955 /* Attempt to read any stored assertion state before we reboot
956 * the mcfw out of the assertion handler. Retry twice, once
913 * because a boot-time assertion might cause this command to fail 957 * because a boot-time assertion might cause this command to fail
914 * with EINTR. And once again because GET_ASSERTS can race with 958 * with EINTR. And once again because GET_ASSERTS can race with
915 * MC_CMD_REBOOT running on the other port. */ 959 * MC_CMD_REBOOT running on the other port. */
916 retry = 2; 960 retry = 2;
917 do { 961 do {
918 MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0); 962 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
919 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, 963 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
920 inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN, 964 inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
921 assertion, sizeof(assertion), &outlen); 965 outbuf, sizeof(outbuf), &outlen);
922 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); 966 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
923 967
924 if (rc) 968 if (rc)
@@ -926,21 +970,11 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
926 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 970 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
927 return -EINVAL; 971 return -EINVAL;
928 972
929 flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS); 973 /* Print out any recorded assertion state */
974 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
930 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 975 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
931 return 0; 976 return 0;
932 977
933 /* Reset the hardware atomically such that only one port with succeed.
934 * This command will succeed if a reboot is no longer required (because
935 * the other port did it first), but fail with EIO if it succeeds.
936 */
937 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
938 MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
939 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
940 efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
941 NULL, 0, NULL);
942
943 /* Print out the assertion */
944 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 978 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
945 ? "system-level assertion" 979 ? "system-level assertion"
946 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 980 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
@@ -949,20 +983,45 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
949 ? "watchdog reset" 983 ? "watchdog reset"
950 : "unknown assertion"; 984 : "unknown assertion";
951 EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, 985 EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
952 MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS), 986 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
953 MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS)); 987 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
954 988
955 /* Print out the registers */ 989 /* Print out the registers */
956 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 990 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
957 for (index = 1; index < 32; index++) { 991 for (index = 1; index < 32; index++) {
958 EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index, 992 EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
959 MCDI_DWORD2(assertion, ofst)); 993 MCDI_DWORD2(outbuf, ofst));
960 ofst += sizeof(efx_dword_t); 994 ofst += sizeof(efx_dword_t);
961 } 995 }
962 996
963 return 0; 997 return 0;
964} 998}
965 999
1000static void efx_mcdi_exit_assertion(struct efx_nic *efx)
1001{
1002 u8 inbuf[MC_CMD_REBOOT_IN_LEN];
1003
1004 /* Atomically reboot the mcfw out of the assertion handler */
1005 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1006 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1007 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
1008 efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
1009 NULL, 0, NULL);
1010}
1011
1012int efx_mcdi_handle_assertion(struct efx_nic *efx)
1013{
1014 int rc;
1015
1016 rc = efx_mcdi_read_assertion(efx);
1017 if (rc)
1018 return rc;
1019
1020 efx_mcdi_exit_assertion(efx);
1021
1022 return 0;
1023}
1024
966void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1025void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
967{ 1026{
968 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; 1027 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index 10ce98f4c0f..f1f89ad4075 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -116,6 +116,7 @@ extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
116 loff_t offset, size_t length); 116 loff_t offset, size_t length);
117extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, 117extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
118 unsigned int type); 118 unsigned int type);
119extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
119extern int efx_mcdi_handle_assertion(struct efx_nic *efx); 120extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
120extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 121extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
121extern int efx_mcdi_reset_port(struct efx_nic *efx); 122extern int efx_mcdi_reset_port(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 73e71f42062..bd59302695b 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -786,16 +786,18 @@
786#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 786#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
787#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0 787#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
788#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1 788#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
789#define MC_CMD_GET_PHY_CFG_SHORTBIST_LBN 1 789#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1
790#define MC_CMD_GET_PHY_CFG_SHORTBIST_WIDTH 1 790#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1
791#define MC_CMD_GET_PHY_CFG_LONGBIST_LBN 2 791#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2
792#define MC_CMD_GET_PHY_CFG_LONGBIST_WIDTH 1 792#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1
793#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3 793#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
794#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1 794#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
795#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4 795#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
796#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1 796#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
797#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5 797#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
798#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1 798#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
799#define MC_CMD_GET_PHY_CFG_BIST_LBN 6
800#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1
799#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 801#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
800/* Bitmask of supported capabilities */ 802/* Bitmask of supported capabilities */
801#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 803#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
@@ -832,7 +834,7 @@
832#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 834#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
833#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 835#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
834 836
835/* MC_CMD_START_PHY_BIST: 837/* MC_CMD_START_BIST:
836 * Start a BIST test on the PHY. 838 * Start a BIST test on the PHY.
837 * 839 *
838 * Locks required: PHY_LOCK if doing a PHY BIST 840 * Locks required: PHY_LOCK if doing a PHY BIST
@@ -840,34 +842,71 @@
840 */ 842 */
841#define MC_CMD_START_BIST 0x25 843#define MC_CMD_START_BIST 0x25
842#define MC_CMD_START_BIST_IN_LEN 4 844#define MC_CMD_START_BIST_IN_LEN 4
843#define MC_CMD_START_BIST_TYPE_OFST 0 845#define MC_CMD_START_BIST_IN_TYPE_OFST 0
846#define MC_CMD_START_BIST_OUT_LEN 0
844 847
845/* Run the PHY's short BIST */ 848/* Run the PHY's short cable BIST */
846#define MC_CMD_PHY_BIST_SHORT 1 849#define MC_CMD_PHY_BIST_CABLE_SHORT 1
847/* Run the PHY's long BIST */ 850/* Run the PHY's long cable BIST */
848#define MC_CMD_PHY_BIST_LONG 2 851#define MC_CMD_PHY_BIST_CABLE_LONG 2
849/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */ 852/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
850#define MC_CMD_BPX_SERDES_BIST 3 853#define MC_CMD_BPX_SERDES_BIST 3
854/* Run the MC loopback tests */
855#define MC_CMD_MC_LOOPBACK_BIST 4
856/* Run the PHY's standard BIST */
857#define MC_CMD_PHY_BIST 5
851 858
852/* MC_CMD_POLL_PHY_BIST: (variadic output) 859/* MC_CMD_POLL_PHY_BIST: (variadic output)
853 * Poll for BIST completion 860 * Poll for BIST completion
854 * 861 *
855 * Returns a single status code, and a binary blob of phy-specific 862 * Returns a single status code, and optionally some PHY specific
856 * bist output. If the driver can't succesfully parse the BIST output, 863 * bist output. The driver should only consume the BIST output
857 * it should still respect the Pass/Fail in OUT.RESULT. 864 * after validating OUTLEN and PHY_CFG.PHY_TYPE.
858 * 865 *
859 * Locks required: PHY_LOCK if doing a PHY BIST 866 * If a driver can't succesfully parse the BIST output, it should
867 * still respect the pass/Fail in OUT.RESULT
868 *
869 * Locks required: PHY_LOCK if doing a PHY BIST
860 * Return code: 0, EACCES (if PHY_LOCK is not held) 870 * Return code: 0, EACCES (if PHY_LOCK is not held)
861 */ 871 */
862#define MC_CMD_POLL_BIST 0x26 872#define MC_CMD_POLL_BIST 0x26
863#define MC_CMD_POLL_BIST_IN_LEN 0 873#define MC_CMD_POLL_BIST_IN_LEN 0
864#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN 874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
865#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
866#define MC_CMD_POLL_BIST_RUNNING 1 878#define MC_CMD_POLL_BIST_RUNNING 1
867#define MC_CMD_POLL_BIST_PASSED 2 879#define MC_CMD_POLL_BIST_PASSED 2
868#define MC_CMD_POLL_BIST_FAILED 3 880#define MC_CMD_POLL_BIST_FAILED 3
869#define MC_CMD_POLL_BIST_TIMEOUT 4 881#define MC_CMD_POLL_BIST_TIMEOUT 4
882/* Generic: */
870#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
884/* SFT9001-specific: */
885/* (offset 4 unused?) */
886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
893#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
895#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
896#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
897#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4
898#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9
899/* mrsfp "PHY" driver: */
900#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
901#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0
902#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1
903#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2
904#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3
905#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4
906#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5
907#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6
908#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7
909#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8
871 910
872/* MC_CMD_PHY_SPI: (variadic in, variadic out) 911/* MC_CMD_PHY_SPI: (variadic in, variadic out)
873 * Read/Write/Erase the PHY SPI device 912 * Read/Write/Erase the PHY SPI device
@@ -1206,6 +1245,13 @@
1206#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \ 1245#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \
1207 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178) 1246 (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
1208 1247
1248#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \
1249 MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
1250#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
1251#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
1252#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
1253#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
1254
1209#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 1255#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
1210#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 1256#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
1211 1257
@@ -1216,7 +1262,8 @@
1216#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 1262#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
1217#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 1263#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
1218#define MC_CMD_WOL_TYPE_BITMAP 0x5 1264#define MC_CMD_WOL_TYPE_BITMAP 0x5
1219#define MC_CMD_WOL_TYPE_MAX 0x6 1265#define MC_CMD_WOL_TYPE_LINK 0x6
1266#define MC_CMD_WOL_TYPE_MAX 0x7
1220 1267
1221#define MC_CMD_FILTER_MODE_SIMPLE 0x0 1268#define MC_CMD_FILTER_MODE_SIMPLE 0x0
1222#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff 1269#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
@@ -1357,14 +1404,24 @@
1357 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held) 1404 * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
1358 */ 1405 */
1359#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c 1406#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
1360#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 4 1407#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
1361#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 1408#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
1409#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
1362#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0 1410#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
1363 1411
1364/* MC_CMD_REBOOT: 1412/* MC_CMD_REBOOT:
1365 * Reboot the MC. The AFTER_ASSERTION flag is intended to be used 1413 * Reboot the MC.
1366 * when the driver notices an assertion failure, to allow two ports to 1414 *
1367 * both recover (semi-)gracefully. 1415 * The AFTER_ASSERTION flag is intended to be used when the driver notices
1416 * an assertion failure (at which point it is expected to perform a complete
1417 * tear down and reinitialise), to allow both ports to reset the MC once
1418 * in an atomic fashion.
1419 *
1420 * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
1421 * which means that they will automatically reboot out of the assertion
1422 * handler, so this is in practise an optional operation. It is still
1423 * recommended that drivers execute this to support custom firmwares
1424 * with REBOOT_ON_ASSERT=0.
1368 * 1425 *
1369 * Locks required: NONE 1426 * Locks required: NONE
1370 * Returns: Nothing. You get back a response with ERR=1, DATALEN=0 1427 * Returns: Nothing. You get back a response with ERR=1, DATALEN=0
@@ -1469,11 +1526,10 @@
1469 ((_ofst) + 6) 1526 ((_ofst) + 6)
1470 1527
1471/* MC_CMD_READ_SENSORS 1528/* MC_CMD_READ_SENSORS
1472 * Returns the current (value, state) for each sensor 1529 * Returns the current reading from each sensor
1473 * 1530 *
1474 * Returns the current (value, state) [each 16bit] of each sensor supported by 1531 * Returns a sparse array of sensor readings (indexed by the sensor
1475 * this board, by DMA'ing a sparse array (indexed by the sensor type) into host 1532 * type) into host memory. Each array element is a dword.
1476 * memory.
1477 * 1533 *
1478 * The MC will send a SENSOREVT event every time any sensor changes state. The 1534 * The MC will send a SENSOREVT event every time any sensor changes state. The
1479 * driver is responsible for ensuring that it doesn't miss any events. The board 1535 * driver is responsible for ensuring that it doesn't miss any events. The board
@@ -1486,6 +1542,12 @@
1486#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 1542#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
1487#define MC_CMD_READ_SENSORS_OUT_LEN 0 1543#define MC_CMD_READ_SENSORS_OUT_LEN 0
1488 1544
1545/* Sensor reading fields */
1546#define MC_CMD_READ_SENSOR_VALUE_LBN 0
1547#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16
1548#define MC_CMD_READ_SENSOR_STATE_LBN 16
1549#define MC_CMD_READ_SENSOR_STATE_WIDTH 8
1550
1489 1551
1490/* MC_CMD_GET_PHY_STATE: 1552/* MC_CMD_GET_PHY_STATE:
1491 * Report current state of PHY. A "zombie" PHY is a PHY that has failed to 1553 * Report current state of PHY. A "zombie" PHY is a PHY that has failed to
@@ -1577,4 +1639,98 @@
1577#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 1639#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
1578#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 1640#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
1579 1641
1642
1643/* MC_CMD_TEST_ASSERT:
1644 * Deliberately trigger an assert-detonation in the firmware for testing
1645 * purposes (i.e. to allow tests that the driver copes gracefully).
1646 *
1647 * Locks required: None
1648 * Returns: 0
1649 */
1650
1651#define MC_CMD_TESTASSERT 0x49
1652#define MC_CMD_TESTASSERT_IN_LEN 0
1653#define MC_CMD_TESTASSERT_OUT_LEN 0
1654
1655/* MC_CMD_WORKAROUND 0x4a
1656 *
1657 * Enable/Disable a given workaround. The mcfw will return EINVAL if it
1658 * doesn't understand the given workaround number - which should not
1659 * be treated as a hard error by client code.
1660 *
1661 * This op does not imply any semantics about each workaround, that's between
1662 * the driver and the mcfw on a per-workaround basis.
1663 *
1664 * Locks required: None
1665 * Returns: 0, EINVAL
1666 */
1667#define MC_CMD_WORKAROUND 0x4a
1668#define MC_CMD_WORKAROUND_IN_LEN 8
1669#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
1670#define MC_CMD_WORKAROUND_BUG17230 1
1671#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
1672#define MC_CMD_WORKAROUND_OUT_LEN 0
1673
1674/* MC_CMD_GET_PHY_MEDIA_INFO:
1675 * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
1676 * SFP+ PHYs).
1677 *
1678 * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE);
1679 * the valid "page number" input values, and the output data, are interpreted
1680 * on a per-type basis.
1681 *
1682 * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
1683 * 0xA0 offset 0 or 0x80.
1684 * Anything else: currently undefined.
1685 *
1686 * Locks required: None
1687 * Return code: 0
1688 */
1689#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
1690#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
1691#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
1692#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes))
1693#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
1694#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
1695
1696/* MC_CMD_NVRAM_TEST:
1697 * Test a particular NVRAM partition for valid contents (where "valid"
1698 * depends on the type of partition).
1699 *
1700 * Locks required: None
1701 * Return code: 0
1702 */
1703#define MC_CMD_NVRAM_TEST 0x4c
1704#define MC_CMD_NVRAM_TEST_IN_LEN 4
1705#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
1706#define MC_CMD_NVRAM_TEST_OUT_LEN 4
1707#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
1708#define MC_CMD_NVRAM_TEST_PASS 0
1709#define MC_CMD_NVRAM_TEST_FAIL 1
1710#define MC_CMD_NVRAM_TEST_NOTSUPP 2
1711
1712/* MC_CMD_MRSFP_TWEAK: (debug)
1713 * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds.
1714 * I2C I/O expander bits are always read; if equaliser parameters are supplied,
1715 * they are configured first.
1716 *
1717 * Locks required: None
1718 * Return code: 0, EINVAL
1719 */
1720#define MC_CMD_MRSFP_TWEAK 0x4d
1721#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0
1722#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16
1723#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */
1724#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */
1725#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */
1726#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */
1727#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
1728#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */
1729#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
1730#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
1731
1732/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
1733 * used for post-3.0 extensions. If you run out of space, look for gaps or
1734 * commands that are unused in the existing range. */
1735
1580#endif /* MCDI_PCOL_H */ 1736#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index eb694af7a47..34c22fa986e 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -381,6 +381,18 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
381 * but by convention we don't */ 381 * but by convention we don't */
382 efx->loopback_modes &= ~(1 << LOOPBACK_NONE); 382 efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
383 383
384 /* Set the initial link mode */
385 efx_mcdi_phy_decode_link(
386 efx, &efx->link_state,
387 MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
388 MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
389 MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
390
391 /* Default to Autonegotiated flow control if the PHY supports it */
392 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
393 if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
394 efx->wanted_fc |= EFX_FC_AUTO;
395
384 return 0; 396 return 0;
385 397
386fail: 398fail:
@@ -436,7 +448,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
436 448
437 /* The link partner capabilities are only relevent if the 449 /* The link partner capabilities are only relevent if the
438 * link supports flow control autonegotiation */ 450 * link supports flow control autonegotiation */
439 if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) 451 if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
440 return; 452 return;
441 453
442 /* If flow control autoneg is supported and enabled, then fine */ 454 /* If flow control autoneg is supported and enabled, then fine */
@@ -560,6 +572,27 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
560 return 0; 572 return 0;
561} 573}
562 574
575static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
576{
577 u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
578 size_t outlen;
579 int rc;
580
581 BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
582
583 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
584 outbuf, sizeof(outbuf), &outlen);
585 if (rc)
586 return rc;
587
588 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
589 return -EMSGSIZE;
590 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
591 return -EINVAL;
592
593 return 0;
594}
595
563struct efx_phy_operations efx_mcdi_phy_ops = { 596struct efx_phy_operations efx_mcdi_phy_ops = {
564 .probe = efx_mcdi_phy_probe, 597 .probe = efx_mcdi_phy_probe,
565 .init = efx_port_dummy_op_int, 598 .init = efx_port_dummy_op_int,
@@ -569,6 +602,7 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
569 .remove = efx_mcdi_phy_remove, 602 .remove = efx_mcdi_phy_remove,
570 .get_settings = efx_mcdi_phy_get_settings, 603 .get_settings = efx_mcdi_phy_get_settings,
571 .set_settings = efx_mcdi_phy_set_settings, 604 .set_settings = efx_mcdi_phy_set_settings,
605 .test_alive = efx_mcdi_phy_test_alive,
572 .run_tests = NULL, 606 .run_tests = NULL,
573 .test_name = NULL, 607 .test_name = NULL,
574}; 608};
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 1574e52f059..0548fcbbdcd 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -335,3 +335,27 @@ enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
335 mii_advertise_flowctrl(efx->wanted_fc), 335 mii_advertise_flowctrl(efx->wanted_fc),
336 efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA)); 336 efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
337} 337}
338
339int efx_mdio_test_alive(struct efx_nic *efx)
340{
341 int rc;
342 int devad = __ffs(efx->mdio.mmds);
343 u16 physid1, physid2;
344
345 mutex_lock(&efx->mac_lock);
346
347 physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
348 physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
349
350 if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
351 (physid2 == 0x0000) || (physid2 == 0xffff)) {
352 EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
353 efx->mdio.prtad);
354 rc = -EINVAL;
355 } else {
356 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
357 }
358
359 mutex_unlock(&efx->mac_lock);
360 return rc;
361}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f6ac9503339..f89e7192960 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -106,4 +106,7 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
106 mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state); 106 mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
107} 107}
108 108
109/* Liveness self-test for MDIO PHYs */
110extern int efx_mdio_test_alive(struct efx_nic *efx);
111
109#endif /* EFX_MDIO_10G_H */ 112#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index d5aab5b3fa0..cb018e27209 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,7 +18,6 @@
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/ethtool.h> 19#include <linux/ethtool.h>
20#include <linux/if_vlan.h> 20#include <linux/if_vlan.h>
21#include <linux/timer.h>
22#include <linux/mdio.h> 21#include <linux/mdio.h>
23#include <linux/list.h> 22#include <linux/list.h>
24#include <linux/pci.h> 23#include <linux/pci.h>
@@ -101,9 +100,6 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
101 * Special buffers are used for the event queues and the TX and RX 100 * Special buffers are used for the event queues and the TX and RX
102 * descriptor queues for each channel. They are *not* used for the 101 * descriptor queues for each channel. They are *not* used for the
103 * actual transmit and receive buffers. 102 * actual transmit and receive buffers.
104 *
105 * Note that for Falcon, TX and RX descriptor queues live in host memory.
106 * Allocation and freeing procedures must take this into account.
107 */ 103 */
108struct efx_special_buffer { 104struct efx_special_buffer {
109 void *addr; 105 void *addr;
@@ -300,7 +296,7 @@ struct efx_rx_queue {
300 * @dma_addr: DMA base address of the buffer 296 * @dma_addr: DMA base address of the buffer
301 * @len: Buffer length, in bytes 297 * @len: Buffer length, in bytes
302 * 298 *
303 * Falcon uses these buffers for its interrupt status registers and 299 * The NIC uses these buffers for its interrupt status registers and
304 * MAC stats dumps. 300 * MAC stats dumps.
305 */ 301 */
306struct efx_buffer { 302struct efx_buffer {
@@ -516,8 +512,9 @@ struct efx_mac_operations {
516 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 512 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
517 * @set_npage_adv: Set abilities advertised in (Extended) Next Page 513 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
518 * (only needed where AN bit is set in mmds) 514 * (only needed where AN bit is set in mmds)
515 * @test_alive: Test that PHY is 'alive' (online)
519 * @test_name: Get the name of a PHY-specific test/result 516 * @test_name: Get the name of a PHY-specific test/result
520 * @run_tests: Run tests and record results as appropriate. 517 * @run_tests: Run tests and record results as appropriate (offline).
521 * Flags are the ethtool tests flags. 518 * Flags are the ethtool tests flags.
522 */ 519 */
523struct efx_phy_operations { 520struct efx_phy_operations {
@@ -532,6 +529,7 @@ struct efx_phy_operations {
532 int (*set_settings) (struct efx_nic *efx, 529 int (*set_settings) (struct efx_nic *efx,
533 struct ethtool_cmd *ecmd); 530 struct ethtool_cmd *ecmd);
534 void (*set_npage_adv) (struct efx_nic *efx, u32); 531 void (*set_npage_adv) (struct efx_nic *efx, u32);
532 int (*test_alive) (struct efx_nic *efx);
535 const char *(*test_name) (struct efx_nic *efx, unsigned int index); 533 const char *(*test_name) (struct efx_nic *efx, unsigned int index);
536 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); 534 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
537}; 535};
@@ -672,7 +670,7 @@ union efx_multicast_hash {
672 * @irq_status: Interrupt status buffer 670 * @irq_status: Interrupt status buffer
673 * @last_irq_cpu: Last CPU to handle interrupt. 671 * @last_irq_cpu: Last CPU to handle interrupt.
674 * This register is written with the SMP processor ID whenever an 672 * This register is written with the SMP processor ID whenever an
675 * interrupt is handled. It is used by falcon_test_interrupt() 673 * interrupt is handled. It is used by efx_nic_test_interrupt()
676 * to verify that an interrupt has occurred. 674 * to verify that an interrupt has occurred.
677 * @spi_flash: SPI flash device 675 * @spi_flash: SPI flash device
678 * This field will be %NULL if no flash device is present (or for Siena). 676 * This field will be %NULL if no flash device is present (or for Siena).
@@ -721,8 +719,7 @@ union efx_multicast_hash {
721 * @loopback_modes: Supported loopback mode bitmask 719 * @loopback_modes: Supported loopback mode bitmask
722 * @loopback_selftest: Offline self-test private state 720 * @loopback_selftest: Offline self-test private state
723 * 721 *
724 * The @priv field of the corresponding &struct net_device points to 722 * This is stored in the private area of the &struct net_device.
725 * this.
726 */ 723 */
727struct efx_nic { 724struct efx_nic {
728 char name[IFNAMSIZ]; 725 char name[IFNAMSIZ];
@@ -995,7 +992,7 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
995 * that the net driver will program into the MAC as the maximum frame 992 * that the net driver will program into the MAC as the maximum frame
996 * length. 993 * length.
997 * 994 *
998 * The 10G MAC used in Falcon requires 8-byte alignment on the frame 995 * The 10G MAC requires 8-byte alignment on the frame
999 * length, so we round up to the nearest 8. 996 * length, so we round up to the nearest 8.
1000 * 997 *
1001 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an 998 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index db44224ed2c..b06f8e34830 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -623,10 +623,6 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
623 * 623 *
624 * This writes the EVQ_RPTR_REG register for the specified channel's 624 * This writes the EVQ_RPTR_REG register for the specified channel's
625 * event queue. 625 * event queue.
626 *
627 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
628 * whereas channel->eventq_read_ptr contains the index of the "next to
629 * read" event.
630 */ 626 */
631void efx_nic_eventq_read_ack(struct efx_channel *channel) 627void efx_nic_eventq_read_ack(struct efx_channel *channel)
632{ 628{
@@ -1384,6 +1380,15 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1384 efx->last_irq_cpu = raw_smp_processor_id(); 1380 efx->last_irq_cpu = raw_smp_processor_id();
1385 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1381 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1386 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1382 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1383 } else if (EFX_WORKAROUND_15783(efx)) {
1384 /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
1385 * because this might be a shared interrupt, but we do need to
1386 * check the channel every time and preemptively rearm it if
1387 * it's idle. */
1388 efx_for_each_channel(channel, efx) {
1389 if (!channel->work_pending)
1390 efx_nic_eventq_read_ack(channel);
1391 }
1387 } 1392 }
1388 1393
1389 return result; 1394 return result;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index e0d13a45101..14793d8bd66 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -445,4 +445,5 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
445 .remove = qt202x_phy_remove, 445 .remove = qt202x_phy_remove,
446 .get_settings = qt202x_phy_get_settings, 446 .get_settings = qt202x_phy_get_settings,
447 .set_settings = efx_mdio_set_settings, 447 .set_settings = efx_mdio_set_settings,
448 .test_alive = efx_mdio_test_alive,
448}; 449};
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 250c8827b84..cf0139a7d9a 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -24,9 +24,6 @@
24#include "nic.h" 24#include "nic.h"
25#include "selftest.h" 25#include "selftest.h"
26#include "workarounds.h" 26#include "workarounds.h"
27#include "spi.h"
28#include "io.h"
29#include "mdio_10g.h"
30 27
31/* 28/*
32 * Loopback test packet structure 29 * Loopback test packet structure
@@ -76,42 +73,15 @@ struct efx_loopback_state {
76 * 73 *
77 **************************************************************************/ 74 **************************************************************************/
78 75
79static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests) 76static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
80{ 77{
81 int rc = 0; 78 int rc = 0;
82 int devad;
83 u16 physid1, physid2;
84
85 if (efx->mdio.mode_support & MDIO_SUPPORTS_C45)
86 devad = __ffs(efx->mdio.mmds);
87 else if (efx->mdio.mode_support & MDIO_SUPPORTS_C22)
88 devad = MDIO_DEVAD_NONE;
89 else
90 return 0;
91
92 mutex_lock(&efx->mac_lock);
93 tests->mdio = -1;
94
95 physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
96 physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
97 79
98 if ((physid1 == 0x0000) || (physid1 == 0xffff) || 80 if (efx->phy_op->test_alive) {
99 (physid2 == 0x0000) || (physid2 == 0xffff)) { 81 rc = efx->phy_op->test_alive(efx);
100 EFX_ERR(efx, "no MDIO PHY present with ID %d\n", 82 tests->phy_alive = rc ? -1 : 1;
101 efx->mdio.prtad);
102 rc = -EINVAL;
103 goto out;
104 } 83 }
105 84
106 if (EFX_IS10G(efx)) {
107 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
108 if (rc)
109 goto out;
110 }
111
112out:
113 mutex_unlock(&efx->mac_lock);
114 tests->mdio = rc ? -1 : 1;
115 return rc; 85 return rc;
116} 86}
117 87
@@ -258,7 +228,7 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
258 return 0; 228 return 0;
259 229
260 mutex_lock(&efx->mac_lock); 230 mutex_lock(&efx->mac_lock);
261 rc = efx->phy_op->run_tests(efx, tests->phy, flags); 231 rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
262 mutex_unlock(&efx->mac_lock); 232 mutex_unlock(&efx->mac_lock);
263 return rc; 233 return rc;
264} 234}
@@ -684,7 +654,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
684 /* Online (i.e. non-disruptive) testing 654 /* Online (i.e. non-disruptive) testing
685 * This checks interrupt generation, event delivery and PHY presence. */ 655 * This checks interrupt generation, event delivery and PHY presence. */
686 656
687 rc = efx_test_mdio(efx, tests); 657 rc = efx_test_phy_alive(efx, tests);
688 if (rc && !rc_test) 658 if (rc && !rc_test)
689 rc_test = rc; 659 rc_test = rc;
690 660
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6feee04c96..643bef72b99 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -32,7 +32,7 @@ struct efx_loopback_self_tests {
32 */ 32 */
33struct efx_self_tests { 33struct efx_self_tests {
34 /* online tests */ 34 /* online tests */
35 int mdio; 35 int phy_alive;
36 int nvram; 36 int nvram;
37 int interrupt; 37 int interrupt;
38 int eventq_dma[EFX_MAX_CHANNELS]; 38 int eventq_dma[EFX_MAX_CHANNELS];
@@ -40,7 +40,7 @@ struct efx_self_tests {
40 int eventq_poll[EFX_MAX_CHANNELS]; 40 int eventq_poll[EFX_MAX_CHANNELS];
41 /* offline tests */ 41 /* offline tests */
42 int registers; 42 int registers;
43 int phy[EFX_MAX_PHY_TESTS]; 43 int phy_ext[EFX_MAX_PHY_TESTS];
44 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; 44 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
45}; 45};
46 46
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index f8c6771e66d..1619fb5a64f 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -106,16 +106,11 @@ static int siena_probe_port(struct efx_nic *efx)
106 efx->mdio.mdio_read = siena_mdio_read; 106 efx->mdio.mdio_read = siena_mdio_read;
107 efx->mdio.mdio_write = siena_mdio_write; 107 efx->mdio.mdio_write = siena_mdio_write;
108 108
109 /* Fill out MDIO structure and loopback modes */ 109 /* Fill out MDIO structure, loopback modes, and initial link state */
110 rc = efx->phy_op->probe(efx); 110 rc = efx->phy_op->probe(efx);
111 if (rc != 0) 111 if (rc != 0)
112 return rc; 112 return rc;
113 113
114 /* Initial assumption */
115 efx->link_state.speed = 10000;
116 efx->link_state.fd = true;
117 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
118
119 /* Allocate buffer for stats */ 114 /* Allocate buffer for stats */
120 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, 115 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
121 MC_CMD_MAC_NSTATS * sizeof(u64)); 116 MC_CMD_MAC_NSTATS * sizeof(u64));
@@ -139,7 +134,7 @@ void siena_remove_port(struct efx_nic *efx)
139 134
140static const struct efx_nic_register_test siena_register_tests[] = { 135static const struct efx_nic_register_test siena_register_tests[] = {
141 { FR_AZ_ADR_REGION, 136 { FR_AZ_ADR_REGION,
142 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, 137 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
143 { FR_CZ_USR_EV_CFG, 138 { FR_CZ_USR_EV_CFG,
144 EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, 139 EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
145 { FR_AZ_RX_CFG, 140 { FR_AZ_RX_CFG,
@@ -181,6 +176,12 @@ static int siena_test_registers(struct efx_nic *efx)
181 176
182static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) 177static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
183{ 178{
179 int rc;
180
181 /* Recover from a failed assertion pre-reset */
182 rc = efx_mcdi_handle_assertion(efx);
183 if (rc)
184 return rc;
184 185
185 if (method == RESET_TYPE_WORLD) 186 if (method == RESET_TYPE_WORLD)
186 return efx_mcdi_reset_mc(efx); 187 return efx_mcdi_reset_mc(efx);
@@ -582,6 +583,7 @@ struct efx_nic_type siena_a0_nic_type = {
582 .set_wol = siena_set_wol, 583 .set_wol = siena_set_wol,
583 .resume_wol = siena_init_wol, 584 .resume_wol = siena_init_wol,
584 .test_registers = siena_test_registers, 585 .test_registers = siena_test_registers,
586 .test_nvram = efx_mcdi_nvram_test_all,
585 .default_mac_ops = &efx_mcdi_mac_operations, 587 .default_mac_ops = &efx_mcdi_mac_operations,
586 588
587 .revision = EFX_REV_SIENA_A0, 589 .revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 3009c297c13..10db071bd83 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -842,6 +842,7 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
842 .get_settings = tenxpress_get_settings, 842 .get_settings = tenxpress_get_settings,
843 .set_settings = tenxpress_set_settings, 843 .set_settings = tenxpress_set_settings,
844 .set_npage_adv = sfx7101_set_npage_adv, 844 .set_npage_adv = sfx7101_set_npage_adv,
845 .test_alive = efx_mdio_test_alive,
845 .test_name = sfx7101_test_name, 846 .test_name = sfx7101_test_name,
846 .run_tests = sfx7101_run_tests, 847 .run_tests = sfx7101_run_tests,
847}; 848};
@@ -856,6 +857,7 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
856 .get_settings = tenxpress_get_settings, 857 .get_settings = tenxpress_get_settings,
857 .set_settings = tenxpress_set_settings, 858 .set_settings = tenxpress_set_settings,
858 .set_npage_adv = sft9001_set_npage_adv, 859 .set_npage_adv = sft9001_set_npage_adv,
860 .test_alive = efx_mdio_test_alive,
859 .test_name = sft9001_test_name, 861 .test_name = sft9001_test_name,
860 .run_tests = sft9001_run_tests, 862 .run_tests = sft9001_run_tests,
861}; 863};
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 7402b858cab..42a35f086a9 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1473,13 +1473,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1473 if (ret) 1473 if (ret)
1474 goto out_unregister; 1474 goto out_unregister;
1475 1475
1476 /* pritnt device infomation */ 1476 /* print device infomation */
1477 pr_info("Base address at 0x%x, ", 1477 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1478 (u32)ndev->base_addr); 1478 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1479
1480 for (i = 0; i < 5; i++)
1481 printk("%02X:", ndev->dev_addr[i]);
1482 printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1483 1479
1484 platform_set_drvdata(pdev, ndev); 1480 platform_set_drvdata(pdev, ndev);
1485 1481
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 31233b4c44a..626de766443 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -334,7 +334,7 @@ static const struct {
334 { "SiS 191 PCI Gigabit Ethernet adapter" }, 334 { "SiS 191 PCI Gigabit Ethernet adapter" },
335}; 335};
336 336
337static struct pci_device_id sis190_pci_tbl[] = { 337static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, 338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
339 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, 339 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
340 { 0, }, 340 { 0, },
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 7360d4bbf75..20c5ce47489 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -106,7 +106,7 @@ static const char * card_names[] = {
106 "SiS 900 PCI Fast Ethernet", 106 "SiS 900 PCI Fast Ethernet",
107 "SiS 7016 PCI Fast Ethernet" 107 "SiS 7016 PCI Fast Ethernet"
108}; 108};
109static struct pci_device_id sis900_pci_tbl [] = { 109static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = {
110 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900, 110 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900}, 111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
112 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016, 112 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index db216a72850..6b955a4f19b 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -149,7 +149,7 @@ extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
149extern void mac_drv_clear_rx_queue(struct s_smc *smc); 149extern void mac_drv_clear_rx_queue(struct s_smc *smc);
150extern void enable_tx_irq(struct s_smc *smc, u_short queue); 150extern void enable_tx_irq(struct s_smc *smc, u_short queue);
151 151
152static struct pci_device_id skfddi_pci_tbl[] = { 152static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = {
153 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, }, 153 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
154 { } /* Terminating entry */ 154 { } /* Terminating entry */
155}; 155};
@@ -435,13 +435,7 @@ static int skfp_driver_init(struct net_device *dev)
435 goto fail; 435 goto fail;
436 } 436 }
437 read_address(smc, NULL); 437 read_address(smc, NULL);
438 pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n", 438 pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
439 smc->hw.fddi_canon_addr.a[0],
440 smc->hw.fddi_canon_addr.a[1],
441 smc->hw.fddi_canon_addr.a[2],
442 smc->hw.fddi_canon_addr.a[3],
443 smc->hw.fddi_canon_addr.a[4],
444 smc->hw.fddi_canon_addr.a[5]);
445 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6); 439 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
446 440
447 smt_reset_defaults(smc, 0); 441 smt_reset_defaults(smc, 0);
@@ -890,15 +884,8 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
890 (struct fddi_addr *)dmi->dmi_addr, 884 (struct fddi_addr *)dmi->dmi_addr,
891 1); 885 1);
892 886
893 pr_debug(KERN_INFO "ENABLE MC ADDRESS:"); 887 pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
894 pr_debug(" %02x %02x %02x ", 888 dmi->dmi_addr);
895 dmi->dmi_addr[0],
896 dmi->dmi_addr[1],
897 dmi->dmi_addr[2]);
898 pr_debug("%02x %02x %02x\n",
899 dmi->dmi_addr[3],
900 dmi->dmi_addr[4],
901 dmi->dmi_addr[5]);
902 dmi = dmi->next; 889 dmi = dmi->next;
903 } // for 890 } // for
904 891
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 379a3dc0016..5ff46eb18d0 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,7 +78,7 @@ static int debug = -1; /* defaults above */
78module_param(debug, int, 0); 78module_param(debug, int, 0);
79MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 79MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
80 80
81static const struct pci_device_id skge_id_table[] = { 81static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
82 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
83 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) }, 83 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
84 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 67249c3c9f5..a6ddfc1a9cb 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -3188,7 +3188,9 @@ static void sky2_reset(struct sky2_hw *hw)
3188static void sky2_detach(struct net_device *dev) 3188static void sky2_detach(struct net_device *dev)
3189{ 3189{
3190 if (netif_running(dev)) { 3190 if (netif_running(dev)) {
3191 netif_tx_lock(dev);
3191 netif_device_detach(dev); /* stop txq */ 3192 netif_device_detach(dev); /* stop txq */
3193 netif_tx_unlock(dev);
3192 sky2_down(dev); 3194 sky2_down(dev);
3193 } 3195 }
3194} 3196}
@@ -3864,6 +3866,50 @@ static int sky2_get_regs_len(struct net_device *dev)
3864 return 0x4000; 3866 return 0x4000;
3865} 3867}
3866 3868
3869static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
3870{
3871 /* This complicated switch statement is to make sure and
3872 * only access regions that are unreserved.
3873 * Some blocks are only valid on dual port cards.
3874 */
3875 switch (b) {
3876 /* second port */
3877 case 5: /* Tx Arbiter 2 */
3878 case 9: /* RX2 */
3879 case 14 ... 15: /* TX2 */
3880 case 17: case 19: /* Ram Buffer 2 */
3881 case 22 ... 23: /* Tx Ram Buffer 2 */
3882 case 25: /* Rx MAC Fifo 1 */
3883 case 27: /* Tx MAC Fifo 2 */
3884 case 31: /* GPHY 2 */
3885 case 40 ... 47: /* Pattern Ram 2 */
3886 case 52: case 54: /* TCP Segmentation 2 */
3887 case 112 ... 116: /* GMAC 2 */
3888 return hw->ports > 1;
3889
3890 case 0: /* Control */
3891 case 2: /* Mac address */
3892 case 4: /* Tx Arbiter 1 */
3893 case 7: /* PCI express reg */
3894 case 8: /* RX1 */
3895 case 12 ... 13: /* TX1 */
3896 case 16: case 18:/* Rx Ram Buffer 1 */
3897 case 20 ... 21: /* Tx Ram Buffer 1 */
3898 case 24: /* Rx MAC Fifo 1 */
3899 case 26: /* Tx MAC Fifo 1 */
3900 case 28 ... 29: /* Descriptor and status unit */
3901 case 30: /* GPHY 1*/
3902 case 32 ... 39: /* Pattern Ram 1 */
3903 case 48: case 50: /* TCP Segmentation 1 */
3904 case 56 ... 60: /* PCI space */
3905 case 80 ... 84: /* GMAC 1 */
3906 return 1;
3907
3908 default:
3909 return 0;
3910 }
3911}
3912
3867/* 3913/*
3868 * Returns copy of control register region 3914 * Returns copy of control register region
3869 * Note: ethtool_get_regs always provides full size (16k) buffer 3915 * Note: ethtool_get_regs always provides full size (16k) buffer
@@ -3878,55 +3924,13 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3878 regs->version = 1; 3924 regs->version = 1;
3879 3925
3880 for (b = 0; b < 128; b++) { 3926 for (b = 0; b < 128; b++) {
3881 /* This complicated switch statement is to make sure and 3927 /* skip poisonous diagnostic ram region in block 3 */
3882 * only access regions that are unreserved. 3928 if (b == 3)
3883 * Some blocks are only valid on dual port cards.
3884 * and block 3 has some special diagnostic registers that
3885 * are poison.
3886 */
3887 switch (b) {
3888 case 3:
3889 /* skip diagnostic ram region */
3890 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); 3929 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
3891 break; 3930 else if (sky2_reg_access_ok(sky2->hw, b))
3892
3893 /* dual port cards only */
3894 case 5: /* Tx Arbiter 2 */
3895 case 9: /* RX2 */
3896 case 14 ... 15: /* TX2 */
3897 case 17: case 19: /* Ram Buffer 2 */
3898 case 22 ... 23: /* Tx Ram Buffer 2 */
3899 case 25: /* Rx MAC Fifo 1 */
3900 case 27: /* Tx MAC Fifo 2 */
3901 case 31: /* GPHY 2 */
3902 case 40 ... 47: /* Pattern Ram 2 */
3903 case 52: case 54: /* TCP Segmentation 2 */
3904 case 112 ... 116: /* GMAC 2 */
3905 if (sky2->hw->ports == 1)
3906 goto reserved;
3907 /* fall through */
3908 case 0: /* Control */
3909 case 2: /* Mac address */
3910 case 4: /* Tx Arbiter 1 */
3911 case 7: /* PCI express reg */
3912 case 8: /* RX1 */
3913 case 12 ... 13: /* TX1 */
3914 case 16: case 18:/* Rx Ram Buffer 1 */
3915 case 20 ... 21: /* Tx Ram Buffer 1 */
3916 case 24: /* Rx MAC Fifo 1 */
3917 case 26: /* Tx MAC Fifo 1 */
3918 case 28 ... 29: /* Descriptor and status unit */
3919 case 30: /* GPHY 1*/
3920 case 32 ... 39: /* Pattern Ram 1 */
3921 case 48: case 50: /* TCP Segmentation 1 */
3922 case 56 ... 60: /* PCI space */
3923 case 80 ... 84: /* GMAC 1 */
3924 memcpy_fromio(p, io, 128); 3931 memcpy_fromio(p, io, 128);
3925 break; 3932 else
3926 default:
3927reserved:
3928 memset(p, 0, 128); 3933 memset(p, 0, 128);
3929 }
3930 3934
3931 p += 128; 3935 p += 128;
3932 io += 128; 3936 io += 128;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 365d79c7d83..54cb303443e 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2156,7 +2156,7 @@ struct tx_ring_info {
2156 struct sk_buff *skb; 2156 struct sk_buff *skb;
2157 unsigned long flags; 2157 unsigned long flags;
2158#define TX_MAP_SINGLE 0x0001 2158#define TX_MAP_SINGLE 0x0001
2159#define TX_MAP_PAGE 000002 2159#define TX_MAP_PAGE 0x0002
2160 DECLARE_PCI_UNMAP_ADDR(mapaddr); 2160 DECLARE_PCI_UNMAP_ADDR(mapaddr);
2161 DECLARE_PCI_UNMAP_LEN(maplen); 2161 DECLARE_PCI_UNMAP_LEN(maplen);
2162}; 2162};
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 44ebbaa7457..3c5a4f52345 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -2017,10 +2017,8 @@ static int __devinit smc911x_probe(struct net_device *dev)
2017 "set using ifconfig\n", dev->name); 2017 "set using ifconfig\n", dev->name);
2018 } else { 2018 } else {
2019 /* Print the Ethernet address */ 2019 /* Print the Ethernet address */
2020 printk("%s: Ethernet addr: ", dev->name); 2020 printk("%s: Ethernet addr: %pM\n",
2021 for (i = 0; i < 5; i++) 2021 dev->name, dev->dev_addr);
2022 printk("%2.2x:", dev->dev_addr[i]);
2023 printk("%2.2x\n", dev->dev_addr[5]);
2024 } 2022 }
2025 2023
2026 if (lp->phy_type == 0) { 2024 if (lp->phy_type == 0) {
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 12f0f5d74e3..1495a5dd4b4 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -80,7 +80,7 @@ struct smsc9420_pdata {
80 int last_carrier; 80 int last_carrier;
81}; 81};
82 82
83static const struct pci_device_id smsc9420_id_table[] = { 83static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
84 { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, }, 84 { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
85 { 0, } 85 { 0, }
86}; 86};
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 218524857bf..16191998ac6 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
72 72
73char spider_net_driver_name[] = "spidernet"; 73char spider_net_driver_name[] = "spidernet";
74 74
75static struct pci_device_id spider_net_pci_tbl[] = { 75static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
76 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET, 76 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
77 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 77 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
78 { 0, } 78 { 0, }
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index f9521136a86..d0556a9b456 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -301,7 +301,7 @@ enum chipset {
301 CH_6915 = 0, 301 CH_6915 = 0,
302}; 302};
303 303
304static struct pci_device_id starfire_pci_tbl[] = { 304static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
305 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 }, 305 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
306 { 0, } 306 { 0, }
307}; 307};
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index 35eaa5251d7..fb287649a30 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -4,8 +4,9 @@ config STMMAC_ETH
4 select PHYLIB 4 select PHYLIB
5 depends on NETDEVICES && CPU_SUBTYPE_ST40 5 depends on NETDEVICES && CPU_SUBTYPE_ST40
6 help 6 help
7 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet 7 This is the driver for the Ethernet IPs are built around a
8 controllers. ST Ethernet IPs are built around a Synopsys IP Core. 8 Synopsys IP Core and fully tested on the STMicroelectronics
9 platforms.
9 10
10if STMMAC_ETH 11if STMMAC_ETH
11 12
@@ -32,7 +33,8 @@ config STMMAC_TIMER
32 default n 33 default n
33 help 34 help
34 Use an external timer for mitigating the number of network 35 Use an external timer for mitigating the number of network
35 interrupts. 36 interrupts. Currently, for SH architectures, it is possible
37 to use the TMU channel 2 and the SH-RTC device.
36 38
37choice 39choice
38 prompt "Select Timer device" 40 prompt "Select Timer device"
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index b2d7a5564df..c776af15fe1 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o 1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o 2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ 3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
4 mac100.o gmac.o $(stmmac-y) 4 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
5 dwmac100.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index e49e5188e88..7267bcd43d0 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -23,132 +23,7 @@
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include "descs.h" 25#include "descs.h"
26#include <linux/io.h> 26#include <linux/netdevice.h>
27
28/* *********************************************
29 DMA CRS Control and Status Register Mapping
30 * *********************************************/
31#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
32#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
33#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
34#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
35#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
36#define DMA_STATUS 0x00001014 /* Status Register */
37#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
38#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
39#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
40#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
41#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
42
43/* ********************************
44 DMA Control register defines
45 * ********************************/
46#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
47#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
48
49/* **************************************
50 DMA Interrupt Enable register defines
51 * **************************************/
52/**** NORMAL INTERRUPT ****/
53#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
54#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
55#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
56#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
57#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
58
59#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
60 DMA_INTR_ENA_TIE)
61
62/**** ABNORMAL INTERRUPT ****/
63#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
64#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
65#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
66#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
67#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
68#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
69#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
70#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
71#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
72#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
73
74#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
75 DMA_INTR_ENA_UNE)
76
77/* DMA default interrupt mask */
78#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
79
80/* ****************************
81 * DMA Status register defines
82 * ****************************/
83#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
84#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
85#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
86#define DMA_STATUS_GMI 0x08000000
87#define DMA_STATUS_GLI 0x04000000
88#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
89#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
90#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
91#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
92#define DMA_STATUS_TS_SHIFT 20
93#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
94#define DMA_STATUS_RS_SHIFT 17
95#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
96#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
97#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
98#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
99#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
100#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
101#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
102#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
103#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
104#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
105#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
106#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
107#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
108#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
109#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
110
111/* Other defines */
112#define HASH_TABLE_SIZE 64
113#define PAUSE_TIME 0x200
114
115/* Flow Control defines */
116#define FLOW_OFF 0
117#define FLOW_RX 1
118#define FLOW_TX 2
119#define FLOW_AUTO (FLOW_TX | FLOW_RX)
120
121/* DMA STORE-AND-FORWARD Operation Mode */
122#define SF_DMA_MODE 1
123
124#define HW_CSUM 1
125#define NO_HW_CSUM 0
126
127/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
128#define BUF_SIZE_16KiB 16384
129#define BUF_SIZE_8KiB 8192
130#define BUF_SIZE_4KiB 4096
131#define BUF_SIZE_2KiB 2048
132
133/* Power Down and WOL */
134#define PMT_NOT_SUPPORTED 0
135#define PMT_SUPPORTED 1
136
137/* Common MAC defines */
138#define MAC_CTRL_REG 0x00000000 /* MAC Control */
139#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
140#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
141
142/* MAC Management Counters register */
143#define MMC_CONTROL 0x00000100 /* MMC Control */
144#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
145#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
146#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
147#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
148
149#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
150#define MMC_CONTROL_MAX_FRM_SHIFT 3
151#define MMC_CONTROL_MAX_FRAME 0x7FF
152 27
153struct stmmac_extra_stats { 28struct stmmac_extra_stats {
154 /* Transmit errors */ 29 /* Transmit errors */
@@ -198,66 +73,62 @@ struct stmmac_extra_stats {
198 unsigned long normal_irq_n; 73 unsigned long normal_irq_n;
199}; 74};
200 75
201/* GMAC core can compute the checksums in HW. */ 76#define HASH_TABLE_SIZE 64
202enum rx_frame_status { 77#define PAUSE_TIME 0x200
78
79/* Flow Control defines */
80#define FLOW_OFF 0
81#define FLOW_RX 1
82#define FLOW_TX 2
83#define FLOW_AUTO (FLOW_TX | FLOW_RX)
84
85#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
86
87#define HW_CSUM 1
88#define NO_HW_CSUM 0
89enum rx_frame_status { /* IPC status */
203 good_frame = 0, 90 good_frame = 0,
204 discard_frame = 1, 91 discard_frame = 1,
205 csum_none = 2, 92 csum_none = 2,
206}; 93};
207 94
208static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], 95enum tx_dma_irq_status {
209 unsigned int high, unsigned int low) 96 tx_hard_error = 1,
210{ 97 tx_hard_error_bump_tc = 2,
211 unsigned long data; 98 handle_tx_rx = 3,
212 99};
213 data = (addr[5] << 8) | addr[4];
214 writel(data, ioaddr + high);
215 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
216 writel(data, ioaddr + low);
217 100
218 return; 101/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
219} 102#define BUF_SIZE_16KiB 16384
103#define BUF_SIZE_8KiB 8192
104#define BUF_SIZE_4KiB 4096
105#define BUF_SIZE_2KiB 2048
220 106
221static inline void stmmac_get_mac_addr(unsigned long ioaddr, 107/* Power Down and WOL */
222 unsigned char *addr, unsigned int high, 108#define PMT_NOT_SUPPORTED 0
223 unsigned int low) 109#define PMT_SUPPORTED 1
224{
225 unsigned int hi_addr, lo_addr;
226 110
227 /* Read the MAC address from the hardware */ 111/* Common MAC defines */
228 hi_addr = readl(ioaddr + high); 112#define MAC_CTRL_REG 0x00000000 /* MAC Control */
229 lo_addr = readl(ioaddr + low); 113#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
114#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
230 115
231 /* Extract the MAC address from the high and low words */ 116/* MAC Management Counters register */
232 addr[0] = lo_addr & 0xff; 117#define MMC_CONTROL 0x00000100 /* MMC Control */
233 addr[1] = (lo_addr >> 8) & 0xff; 118#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
234 addr[2] = (lo_addr >> 16) & 0xff; 119#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
235 addr[3] = (lo_addr >> 24) & 0xff; 120#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
236 addr[4] = hi_addr & 0xff; 121#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
237 addr[5] = (hi_addr >> 8) & 0xff;
238 122
239 return; 123#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
240} 124#define MMC_CONTROL_MAX_FRM_SHIFT 3
125#define MMC_CONTROL_MAX_FRAME 0x7FF
241 126
242struct stmmac_ops { 127struct stmmac_desc_ops {
243 /* MAC core initialization */ 128 /* DMA RX descriptor ring initialization */
244 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
245 /* DMA core initialization */
246 int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
247 /* Dump MAC registers */
248 void (*dump_mac_regs) (unsigned long ioaddr);
249 /* Dump DMA registers */
250 void (*dump_dma_regs) (unsigned long ioaddr);
251 /* Set tx/rx threshold in the csr6 register
252 * An invalid value enables the store-and-forward mode */
253 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
254 /* To track extra statistic (if supported) */
255 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
256 unsigned long ioaddr);
257 /* RX descriptor ring initialization */
258 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, 129 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
259 int disable_rx_ic); 130 int disable_rx_ic);
260 /* TX descriptor ring initialization */ 131 /* DMA TX descriptor ring initialization */
261 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); 132 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
262 133
263 /* Invoked by the xmit function to prepare the tx descriptor */ 134 /* Invoked by the xmit function to prepare the tx descriptor */
@@ -281,7 +152,6 @@ struct stmmac_ops {
281 /* Get the buffer size from the descriptor */ 152 /* Get the buffer size from the descriptor */
282 int (*get_tx_len) (struct dma_desc *p); 153 int (*get_tx_len) (struct dma_desc *p);
283 /* Handle extra events on specific interrupts hw dependent */ 154 /* Handle extra events on specific interrupts hw dependent */
284 void (*host_irq_status) (unsigned long ioaddr);
285 int (*get_rx_owner) (struct dma_desc *p); 155 int (*get_rx_owner) (struct dma_desc *p);
286 void (*set_rx_owner) (struct dma_desc *p); 156 void (*set_rx_owner) (struct dma_desc *p);
287 /* Get the receive frame size */ 157 /* Get the receive frame size */
@@ -289,6 +159,37 @@ struct stmmac_ops {
289 /* Return the reception status looking at the RDES1 */ 159 /* Return the reception status looking at the RDES1 */
290 int (*rx_status) (void *data, struct stmmac_extra_stats *x, 160 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
291 struct dma_desc *p); 161 struct dma_desc *p);
162};
163
164struct stmmac_dma_ops {
165 /* DMA core initialization */
166 int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
167 /* Dump DMA registers */
168 void (*dump_regs) (unsigned long ioaddr);
169 /* Set tx/rx threshold in the csr6 register
170 * An invalid value enables the store-and-forward mode */
171 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
172 /* To track extra statistic (if supported) */
173 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
174 unsigned long ioaddr);
175 void (*enable_dma_transmission) (unsigned long ioaddr);
176 void (*enable_dma_irq) (unsigned long ioaddr);
177 void (*disable_dma_irq) (unsigned long ioaddr);
178 void (*start_tx) (unsigned long ioaddr);
179 void (*stop_tx) (unsigned long ioaddr);
180 void (*start_rx) (unsigned long ioaddr);
181 void (*stop_rx) (unsigned long ioaddr);
182 int (*dma_interrupt) (unsigned long ioaddr,
183 struct stmmac_extra_stats *x);
184};
185
186struct stmmac_ops {
187 /* MAC core initialization */
188 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
189 /* Dump MAC registers */
190 void (*dump_regs) (unsigned long ioaddr);
191 /* Handle extra events on specific interrupts hw dependent */
192 void (*host_irq_status) (unsigned long ioaddr);
292 /* Multicast filter setting */ 193 /* Multicast filter setting */
293 void (*set_filter) (struct net_device *dev); 194 void (*set_filter) (struct net_device *dev);
294 /* Flow control setting */ 195 /* Flow control setting */
@@ -298,9 +199,9 @@ struct stmmac_ops {
298 void (*pmt) (unsigned long ioaddr, unsigned long mode); 199 void (*pmt) (unsigned long ioaddr, unsigned long mode);
299 /* Set/Get Unicast MAC addresses */ 200 /* Set/Get Unicast MAC addresses */
300 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr, 201 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
301 unsigned int reg_n); 202 unsigned int reg_n);
302 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr, 203 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
303 unsigned int reg_n); 204 unsigned int reg_n);
304}; 205};
305 206
306struct mac_link { 207struct mac_link {
@@ -314,17 +215,19 @@ struct mii_regs {
314 unsigned int data; /* MII Data */ 215 unsigned int data; /* MII Data */
315}; 216};
316 217
317struct hw_cap { 218struct mac_device_info {
318 unsigned int version; /* Core Version register (GMAC) */ 219 struct stmmac_ops *mac;
319 unsigned int pmt; /* Power-Down mode (GMAC) */ 220 struct stmmac_desc_ops *desc;
221 struct stmmac_dma_ops *dma;
222 unsigned int pmt; /* support Power-Down */
223 struct mii_regs mii; /* MII register Addresses */
320 struct mac_link link; 224 struct mac_link link;
321 struct mii_regs mii;
322}; 225};
323 226
324struct mac_device_info { 227struct mac_device_info *dwmac1000_setup(unsigned long addr);
325 struct hw_cap hw; 228struct mac_device_info *dwmac100_setup(unsigned long addr);
326 struct stmmac_ops *ops;
327};
328 229
329struct mac_device_info *gmac_setup(unsigned long addr); 230extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
330struct mac_device_info *mac100_setup(unsigned long addr); 231 unsigned int high, unsigned int low);
232extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
233 unsigned int high, unsigned int low);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
index 6d2a0b2f5e5..63a03e26469 100644
--- a/drivers/net/stmmac/descs.h
+++ b/drivers/net/stmmac/descs.h
@@ -1,6 +1,6 @@
1/******************************************************************************* 1/*******************************************************************************
2 Header File to describe the DMA descriptors 2 Header File to describe the DMA descriptors.
3 Use enhanced descriptors in case of GMAC Cores. 3 Enhanced descriptors have been in case of DWMAC1000 Cores.
4 4
5 This program is free software; you can redistribute it and/or modify it 5 This program is free software; you can redistribute it and/or modify it
6 under the terms and conditions of the GNU General Public License, 6 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/dwmac100.c
index 625171b6062..82dde774d4c 100644
--- a/drivers/net/stmmac/mac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -26,23 +26,23 @@
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include <linux/netdevice.h>
30#include <linux/crc32.h> 29#include <linux/crc32.h>
31#include <linux/mii.h> 30#include <linux/mii.h>
32#include <linux/phy.h> 31#include <linux/phy.h>
33 32
34#include "common.h" 33#include "common.h"
35#include "mac100.h" 34#include "dwmac100.h"
35#include "dwmac_dma.h"
36 36
37#undef MAC100_DEBUG 37#undef DWMAC100_DEBUG
38/*#define MAC100_DEBUG*/ 38/*#define DWMAC100_DEBUG*/
39#ifdef MAC100_DEBUG 39#ifdef DWMAC100_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args) 40#define DBG(fmt, args...) printk(fmt, ## args)
41#else 41#else
42#define DBG(fmt, args...) do { } while (0) 42#define DBG(fmt, args...) do { } while (0)
43#endif 43#endif
44 44
45static void mac100_core_init(unsigned long ioaddr) 45static void dwmac100_core_init(unsigned long ioaddr)
46{ 46{
47 u32 value = readl(ioaddr + MAC_CONTROL); 47 u32 value = readl(ioaddr + MAC_CONTROL);
48 48
@@ -54,43 +54,43 @@ static void mac100_core_init(unsigned long ioaddr)
54 return; 54 return;
55} 55}
56 56
57static void mac100_dump_mac_regs(unsigned long ioaddr) 57static void dwmac100_dump_mac_regs(unsigned long ioaddr)
58{ 58{
59 pr_info("\t----------------------------------------------\n" 59 pr_info("\t----------------------------------------------\n"
60 "\t MAC100 CSR (base addr = 0x%8x)\n" 60 "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
61 "\t----------------------------------------------\n", 61 "\t----------------------------------------------\n",
62 (unsigned int)ioaddr); 62 (unsigned int)ioaddr);
63 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, 63 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
64 readl(ioaddr + MAC_CONTROL)); 64 readl(ioaddr + MAC_CONTROL));
65 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, 65 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
66 readl(ioaddr + MAC_ADDR_HIGH)); 66 readl(ioaddr + MAC_ADDR_HIGH));
67 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, 67 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
68 readl(ioaddr + MAC_ADDR_LOW)); 68 readl(ioaddr + MAC_ADDR_LOW));
69 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", 69 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
70 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); 70 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
71 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", 71 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
72 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); 72 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
73 pr_info("\tflow control (offset 0x%x): 0x%08x\n", 73 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
74 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); 74 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
75 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, 75 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
76 readl(ioaddr + MAC_VLAN1)); 76 readl(ioaddr + MAC_VLAN1));
77 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, 77 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
78 readl(ioaddr + MAC_VLAN2)); 78 readl(ioaddr + MAC_VLAN2));
79 pr_info("\n\tMAC management counter registers\n"); 79 pr_info("\n\tMAC management counter registers\n");
80 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n", 80 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
81 MMC_CONTROL, readl(ioaddr + MMC_CONTROL)); 81 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
82 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n", 82 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
83 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR)); 83 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
84 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n", 84 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
85 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR)); 85 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
86 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n", 86 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
87 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK)); 87 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
88 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n", 88 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
89 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); 89 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
90 return; 90 return;
91} 91}
92 92
93static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, 93static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
94 u32 dma_rx) 94 u32 dma_rx)
95{ 95{
96 u32 value = readl(ioaddr + DMA_BUS_MODE); 96 u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -117,7 +117,7 @@ static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
117/* Store and Forward capability is not used at all.. 117/* Store and Forward capability is not used at all..
118 * The transmit threshold can be programmed by 118 * The transmit threshold can be programmed by
119 * setting the TTC bits in the DMA control register.*/ 119 * setting the TTC bits in the DMA control register.*/
120static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode, 120static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
121 int rxmode) 121 int rxmode)
122{ 122{
123 u32 csr6 = readl(ioaddr + DMA_CONTROL); 123 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -134,11 +134,11 @@ static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
134 return; 134 return;
135} 135}
136 136
137static void mac100_dump_dma_regs(unsigned long ioaddr) 137static void dwmac100_dump_dma_regs(unsigned long ioaddr)
138{ 138{
139 int i; 139 int i;
140 140
141 DBG(KERN_DEBUG "MAC100 DMA CSR \n"); 141 DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
142 for (i = 0; i < 9; i++) 142 for (i = 0; i < 9; i++)
143 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, 143 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
144 (DMA_BUS_MODE + i * 4), 144 (DMA_BUS_MODE + i * 4),
@@ -151,8 +151,9 @@ static void mac100_dump_dma_regs(unsigned long ioaddr)
151} 151}
152 152
153/* DMA controller has two counters to track the number of 153/* DMA controller has two counters to track the number of
154 the receive missed frames. */ 154 * the receive missed frames. */
155static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, 155static void dwmac100_dma_diagnostic_fr(void *data,
156 struct stmmac_extra_stats *x,
156 unsigned long ioaddr) 157 unsigned long ioaddr)
157{ 158{
158 struct net_device_stats *stats = (struct net_device_stats *)data; 159 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -181,7 +182,8 @@ static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
181 return; 182 return;
182} 183}
183 184
184static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, 185static int dwmac100_get_tx_frame_status(void *data,
186 struct stmmac_extra_stats *x,
185 struct dma_desc *p, unsigned long ioaddr) 187 struct dma_desc *p, unsigned long ioaddr)
186{ 188{
187 int ret = 0; 189 int ret = 0;
@@ -217,7 +219,7 @@ static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
217 return ret; 219 return ret;
218} 220}
219 221
220static int mac100_get_tx_len(struct dma_desc *p) 222static int dwmac100_get_tx_len(struct dma_desc *p)
221{ 223{
222 return p->des01.tx.buffer1_size; 224 return p->des01.tx.buffer1_size;
223} 225}
@@ -226,14 +228,15 @@ static int mac100_get_tx_len(struct dma_desc *p)
226 * and, if required, updates the multicast statistics. 228 * and, if required, updates the multicast statistics.
227 * In case of success, it returns csum_none becasue the device 229 * In case of success, it returns csum_none becasue the device
228 * is not able to compute the csum in HW. */ 230 * is not able to compute the csum in HW. */
229static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, 231static int dwmac100_get_rx_frame_status(void *data,
232 struct stmmac_extra_stats *x,
230 struct dma_desc *p) 233 struct dma_desc *p)
231{ 234{
232 int ret = csum_none; 235 int ret = csum_none;
233 struct net_device_stats *stats = (struct net_device_stats *)data; 236 struct net_device_stats *stats = (struct net_device_stats *)data;
234 237
235 if (unlikely(p->des01.rx.last_descriptor == 0)) { 238 if (unlikely(p->des01.rx.last_descriptor == 0)) {
236 pr_warning("mac100 Error: Oversized Ethernet " 239 pr_warning("dwmac100 Error: Oversized Ethernet "
237 "frame spanned multiple buffers\n"); 240 "frame spanned multiple buffers\n");
238 stats->rx_length_errors++; 241 stats->rx_length_errors++;
239 return discard_frame; 242 return discard_frame;
@@ -276,24 +279,24 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
276 return ret; 279 return ret;
277} 280}
278 281
279static void mac100_irq_status(unsigned long ioaddr) 282static void dwmac100_irq_status(unsigned long ioaddr)
280{ 283{
281 return; 284 return;
282} 285}
283 286
284static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, 287static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
285 unsigned int reg_n) 288 unsigned int reg_n)
286{ 289{
287 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 290 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
288} 291}
289 292
290static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, 293static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
291 unsigned int reg_n) 294 unsigned int reg_n)
292{ 295{
293 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 296 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
294} 297}
295 298
296static void mac100_set_filter(struct net_device *dev) 299static void dwmac100_set_filter(struct net_device *dev)
297{ 300{
298 unsigned long ioaddr = dev->base_addr; 301 unsigned long ioaddr = dev->base_addr;
299 u32 value = readl(ioaddr + MAC_CONTROL); 302 u32 value = readl(ioaddr + MAC_CONTROL);
@@ -319,8 +322,8 @@ static void mac100_set_filter(struct net_device *dev)
319 /* Perfect filter mode for physical address and Hash 322 /* Perfect filter mode for physical address and Hash
320 filter for multicast */ 323 filter for multicast */
321 value |= MAC_CONTROL_HP; 324 value |= MAC_CONTROL_HP;
322 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF 325 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
323 | MAC_CONTROL_HO); 326 MAC_CONTROL_IF | MAC_CONTROL_HO);
324 327
325 memset(mc_filter, 0, sizeof(mc_filter)); 328 memset(mc_filter, 0, sizeof(mc_filter));
326 for (i = 0, mclist = dev->mc_list; 329 for (i = 0, mclist = dev->mc_list;
@@ -347,7 +350,7 @@ static void mac100_set_filter(struct net_device *dev)
347 return; 350 return;
348} 351}
349 352
350static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, 353static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
351 unsigned int fc, unsigned int pause_time) 354 unsigned int fc, unsigned int pause_time)
352{ 355{
353 unsigned int flow = MAC_FLOW_CTRL_ENABLE; 356 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -359,13 +362,15 @@ static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
359 return; 362 return;
360} 363}
361 364
362/* No PMT module supported in our SoC for the Ethernet Controller. */ 365/* No PMT module supported for this Ethernet Controller.
363static void mac100_pmt(unsigned long ioaddr, unsigned long mode) 366 * Tested on ST platforms only.
367 */
368static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
364{ 369{
365 return; 370 return;
366} 371}
367 372
368static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 373static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
369 int disable_rx_ic) 374 int disable_rx_ic)
370{ 375{
371 int i; 376 int i;
@@ -381,7 +386,7 @@ static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
381 return; 386 return;
382} 387}
383 388
384static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 389static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
385{ 390{
386 int i; 391 int i;
387 for (i = 0; i < ring_size; i++) { 392 for (i = 0; i < ring_size; i++) {
@@ -393,32 +398,32 @@ static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
393 return; 398 return;
394} 399}
395 400
396static int mac100_get_tx_owner(struct dma_desc *p) 401static int dwmac100_get_tx_owner(struct dma_desc *p)
397{ 402{
398 return p->des01.tx.own; 403 return p->des01.tx.own;
399} 404}
400 405
401static int mac100_get_rx_owner(struct dma_desc *p) 406static int dwmac100_get_rx_owner(struct dma_desc *p)
402{ 407{
403 return p->des01.rx.own; 408 return p->des01.rx.own;
404} 409}
405 410
406static void mac100_set_tx_owner(struct dma_desc *p) 411static void dwmac100_set_tx_owner(struct dma_desc *p)
407{ 412{
408 p->des01.tx.own = 1; 413 p->des01.tx.own = 1;
409} 414}
410 415
411static void mac100_set_rx_owner(struct dma_desc *p) 416static void dwmac100_set_rx_owner(struct dma_desc *p)
412{ 417{
413 p->des01.rx.own = 1; 418 p->des01.rx.own = 1;
414} 419}
415 420
416static int mac100_get_tx_ls(struct dma_desc *p) 421static int dwmac100_get_tx_ls(struct dma_desc *p)
417{ 422{
418 return p->des01.tx.last_segment; 423 return p->des01.tx.last_segment;
419} 424}
420 425
421static void mac100_release_tx_desc(struct dma_desc *p) 426static void dwmac100_release_tx_desc(struct dma_desc *p)
422{ 427{
423 int ter = p->des01.tx.end_ring; 428 int ter = p->des01.tx.end_ring;
424 429
@@ -444,74 +449,91 @@ static void mac100_release_tx_desc(struct dma_desc *p)
444 return; 449 return;
445} 450}
446 451
447static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 452static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
448 int csum_flag) 453 int csum_flag)
449{ 454{
450 p->des01.tx.first_segment = is_fs; 455 p->des01.tx.first_segment = is_fs;
451 p->des01.tx.buffer1_size = len; 456 p->des01.tx.buffer1_size = len;
452} 457}
453 458
454static void mac100_clear_tx_ic(struct dma_desc *p) 459static void dwmac100_clear_tx_ic(struct dma_desc *p)
455{ 460{
456 p->des01.tx.interrupt = 0; 461 p->des01.tx.interrupt = 0;
457} 462}
458 463
459static void mac100_close_tx_desc(struct dma_desc *p) 464static void dwmac100_close_tx_desc(struct dma_desc *p)
460{ 465{
461 p->des01.tx.last_segment = 1; 466 p->des01.tx.last_segment = 1;
462 p->des01.tx.interrupt = 1; 467 p->des01.tx.interrupt = 1;
463} 468}
464 469
465static int mac100_get_rx_frame_len(struct dma_desc *p) 470static int dwmac100_get_rx_frame_len(struct dma_desc *p)
466{ 471{
467 return p->des01.rx.frame_length; 472 return p->des01.rx.frame_length;
468} 473}
469 474
470struct stmmac_ops mac100_driver = { 475struct stmmac_ops dwmac100_ops = {
471 .core_init = mac100_core_init, 476 .core_init = dwmac100_core_init,
472 .dump_mac_regs = mac100_dump_mac_regs, 477 .dump_regs = dwmac100_dump_mac_regs,
473 .dma_init = mac100_dma_init, 478 .host_irq_status = dwmac100_irq_status,
474 .dump_dma_regs = mac100_dump_dma_regs, 479 .set_filter = dwmac100_set_filter,
475 .dma_mode = mac100_dma_operation_mode, 480 .flow_ctrl = dwmac100_flow_ctrl,
476 .dma_diagnostic_fr = mac100_dma_diagnostic_fr, 481 .pmt = dwmac100_pmt,
477 .tx_status = mac100_get_tx_frame_status, 482 .set_umac_addr = dwmac100_set_umac_addr,
478 .rx_status = mac100_get_rx_frame_status, 483 .get_umac_addr = dwmac100_get_umac_addr,
479 .get_tx_len = mac100_get_tx_len,
480 .set_filter = mac100_set_filter,
481 .flow_ctrl = mac100_flow_ctrl,
482 .pmt = mac100_pmt,
483 .init_rx_desc = mac100_init_rx_desc,
484 .init_tx_desc = mac100_init_tx_desc,
485 .get_tx_owner = mac100_get_tx_owner,
486 .get_rx_owner = mac100_get_rx_owner,
487 .release_tx_desc = mac100_release_tx_desc,
488 .prepare_tx_desc = mac100_prepare_tx_desc,
489 .clear_tx_ic = mac100_clear_tx_ic,
490 .close_tx_desc = mac100_close_tx_desc,
491 .get_tx_ls = mac100_get_tx_ls,
492 .set_tx_owner = mac100_set_tx_owner,
493 .set_rx_owner = mac100_set_rx_owner,
494 .get_rx_frame_len = mac100_get_rx_frame_len,
495 .host_irq_status = mac100_irq_status,
496 .set_umac_addr = mac100_set_umac_addr,
497 .get_umac_addr = mac100_get_umac_addr,
498}; 484};
499 485
500struct mac_device_info *mac100_setup(unsigned long ioaddr) 486struct stmmac_dma_ops dwmac100_dma_ops = {
487 .init = dwmac100_dma_init,
488 .dump_regs = dwmac100_dump_dma_regs,
489 .dma_mode = dwmac100_dma_operation_mode,
490 .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
491 .enable_dma_transmission = dwmac_enable_dma_transmission,
492 .enable_dma_irq = dwmac_enable_dma_irq,
493 .disable_dma_irq = dwmac_disable_dma_irq,
494 .start_tx = dwmac_dma_start_tx,
495 .stop_tx = dwmac_dma_stop_tx,
496 .start_rx = dwmac_dma_start_rx,
497 .stop_rx = dwmac_dma_stop_rx,
498 .dma_interrupt = dwmac_dma_interrupt,
499};
500
501struct stmmac_desc_ops dwmac100_desc_ops = {
502 .tx_status = dwmac100_get_tx_frame_status,
503 .rx_status = dwmac100_get_rx_frame_status,
504 .get_tx_len = dwmac100_get_tx_len,
505 .init_rx_desc = dwmac100_init_rx_desc,
506 .init_tx_desc = dwmac100_init_tx_desc,
507 .get_tx_owner = dwmac100_get_tx_owner,
508 .get_rx_owner = dwmac100_get_rx_owner,
509 .release_tx_desc = dwmac100_release_tx_desc,
510 .prepare_tx_desc = dwmac100_prepare_tx_desc,
511 .clear_tx_ic = dwmac100_clear_tx_ic,
512 .close_tx_desc = dwmac100_close_tx_desc,
513 .get_tx_ls = dwmac100_get_tx_ls,
514 .set_tx_owner = dwmac100_set_tx_owner,
515 .set_rx_owner = dwmac100_set_rx_owner,
516 .get_rx_frame_len = dwmac100_get_rx_frame_len,
517};
518
519struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
501{ 520{
502 struct mac_device_info *mac; 521 struct mac_device_info *mac;
503 522
504 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 523 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
505 524
506 pr_info("\tMAC 10/100\n"); 525 pr_info("\tDWMAC100\n");
526
527 mac->mac = &dwmac100_ops;
528 mac->desc = &dwmac100_desc_ops;
529 mac->dma = &dwmac100_dma_ops;
507 530
508 mac->ops = &mac100_driver; 531 mac->pmt = PMT_NOT_SUPPORTED;
509 mac->hw.pmt = PMT_NOT_SUPPORTED; 532 mac->link.port = MAC_CONTROL_PS;
510 mac->hw.link.port = MAC_CONTROL_PS; 533 mac->link.duplex = MAC_CONTROL_F;
511 mac->hw.link.duplex = MAC_CONTROL_F; 534 mac->link.speed = 0;
512 mac->hw.link.speed = 0; 535 mac->mii.addr = MAC_MII_ADDR;
513 mac->hw.mii.addr = MAC_MII_ADDR; 536 mac->mii.data = MAC_MII_DATA;
514 mac->hw.mii.data = MAC_MII_DATA;
515 537
516 return mac; 538 return mac;
517} 539}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004..0f8f110d004 100644
--- a/drivers/net/stmmac/mac100.h
+++ b/drivers/net/stmmac/dwmac100.h
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/dwmac1000.h
index 2e82d6c9a14..62dca0e384e 100644
--- a/drivers/net/stmmac/gmac.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -20,6 +20,9 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#include <linux/phy.h>
24#include "common.h"
25
23#define GMAC_CONTROL 0x00000000 /* Configuration */ 26#define GMAC_CONTROL 0x00000000 /* Configuration */
24#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ 27#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
25#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ 28#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
@@ -32,7 +35,7 @@
32#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ 35#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
33 36
34#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ 37#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
35enum gmac_irq_status { 38enum dwmac1000_irq_status {
36 time_stamp_irq = 0x0200, 39 time_stamp_irq = 0x0200,
37 mmc_rx_csum_offload_irq = 0x0080, 40 mmc_rx_csum_offload_irq = 0x0080,
38 mmc_tx_irq = 0x0040, 41 mmc_tx_irq = 0x0040,
@@ -202,3 +205,16 @@ enum rtc_control {
202#define GMAC_MMC_RX_INTR 0x104 205#define GMAC_MMC_RX_INTR 0x104
203#define GMAC_MMC_TX_INTR 0x108 206#define GMAC_MMC_TX_INTR 0x108
204#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 207#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
208
209#undef DWMAC1000_DEBUG
210/* #define DWMAC1000__DEBUG */
211#undef FRAME_FILTER_DEBUG
212/* #define FRAME_FILTER_DEBUG */
213#ifdef DWMAC1000__DEBUG
214#define DBG(fmt, args...) printk(fmt, ## args)
215#else
216#define DBG(fmt, args...) do { } while (0)
217#endif
218
219extern struct stmmac_dma_ops dwmac1000_dma_ops;
220extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
new file mode 100644
index 00000000000..d812e9cdb3d
--- /dev/null
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -0,0 +1,245 @@
1/*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 This only implements the mac core functions for this chip.
7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22
23 The full GNU General Public License is included in this distribution in
24 the file called "COPYING".
25
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/
28
29#include <linux/crc32.h>
30#include "dwmac1000.h"
31
32static void dwmac1000_core_init(unsigned long ioaddr)
33{
34 u32 value = readl(ioaddr + GMAC_CONTROL);
35 value |= GMAC_CORE_INIT;
36 writel(value, ioaddr + GMAC_CONTROL);
37
38 /* STBus Bridge Configuration */
39 /*writel(0xc5608, ioaddr + 0x00007000);*/
40
41 /* Freeze MMC counters */
42 writel(0x8, ioaddr + GMAC_MMC_CTRL);
43 /* Mask GMAC interrupts */
44 writel(0x207, ioaddr + GMAC_INT_MASK);
45
46#ifdef STMMAC_VLAN_TAG_USED
47 /* Tag detection without filtering */
48 writel(0x0, ioaddr + GMAC_VLAN_TAG);
49#endif
50 return;
51}
52
53static void dwmac1000_dump_regs(unsigned long ioaddr)
54{
55 int i;
56 pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
57
58 for (i = 0; i < 55; i++) {
59 int offset = i * 4;
60 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
61 offset, readl(ioaddr + offset));
62 }
63 return;
64}
65
66static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
67 unsigned int reg_n)
68{
69 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
70 GMAC_ADDR_LOW(reg_n));
71}
72
73static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
74 unsigned int reg_n)
75{
76 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
77 GMAC_ADDR_LOW(reg_n));
78}
79
80static void dwmac1000_set_filter(struct net_device *dev)
81{
82 unsigned long ioaddr = dev->base_addr;
83 unsigned int value = 0;
84
85 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
86 __func__, dev->mc_count, netdev_uc_count(dev));
87
88 if (dev->flags & IFF_PROMISC)
89 value = GMAC_FRAME_FILTER_PR;
90 else if ((dev->mc_count > HASH_TABLE_SIZE)
91 || (dev->flags & IFF_ALLMULTI)) {
92 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
93 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
94 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
95 } else if (dev->mc_count > 0) {
96 int i;
97 u32 mc_filter[2];
98 struct dev_mc_list *mclist;
99
100 /* Hash filter for multicast */
101 value = GMAC_FRAME_FILTER_HMC;
102
103 memset(mc_filter, 0, sizeof(mc_filter));
104 for (i = 0, mclist = dev->mc_list;
105 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
106 /* The upper 6 bits of the calculated CRC are used to
107 index the contens of the hash table */
108 int bit_nr =
109 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
110 /* The most significant bit determines the register to
111 * use (H/L) while the other 5 bits determine the bit
112 * within the register. */
113 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
114 }
115 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
116 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
117 }
118
119 /* Handle multiple unicast addresses (perfect filtering)*/
120 if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
121 /* Switch to promiscuous mode is more than 16 addrs
122 are required */
123 value |= GMAC_FRAME_FILTER_PR;
124 else {
125 int reg = 1;
126 struct netdev_hw_addr *ha;
127
128 netdev_for_each_uc_addr(ha, dev) {
129 dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
130 reg++;
131 }
132 }
133
134#ifdef FRAME_FILTER_DEBUG
135 /* Enable Receive all mode (to debug filtering_fail errors) */
136 value |= GMAC_FRAME_FILTER_RA;
137#endif
138 writel(value, ioaddr + GMAC_FRAME_FILTER);
139
140 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
141 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
142 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
143
144 return;
145}
146
147static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
148 unsigned int fc, unsigned int pause_time)
149{
150 unsigned int flow = 0;
151
152 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
153 if (fc & FLOW_RX) {
154 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
155 flow |= GMAC_FLOW_CTRL_RFE;
156 }
157 if (fc & FLOW_TX) {
158 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
159 flow |= GMAC_FLOW_CTRL_TFE;
160 }
161
162 if (duplex) {
163 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
164 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
165 }
166
167 writel(flow, ioaddr + GMAC_FLOW_CTRL);
168 return;
169}
170
171static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
172{
173 unsigned int pmt = 0;
174
175 if (mode == WAKE_MAGIC) {
176 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
177 pmt |= power_down | magic_pkt_en;
178 } else if (mode == WAKE_UCAST) {
179 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
180 pmt |= global_unicast;
181 }
182
183 writel(pmt, ioaddr + GMAC_PMT);
184 return;
185}
186
187
188static void dwmac1000_irq_status(unsigned long ioaddr)
189{
190 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
191
192 /* Not used events (e.g. MMC interrupts) are not handled. */
193 if ((intr_status & mmc_tx_irq))
194 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
195 readl(ioaddr + GMAC_MMC_TX_INTR));
196 if (unlikely(intr_status & mmc_rx_irq))
197 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
198 readl(ioaddr + GMAC_MMC_RX_INTR));
199 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
200 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
201 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
202 if (unlikely(intr_status & pmt_irq)) {
203 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
204 /* clear the PMT bits 5 and 6 by reading the PMT
205 * status register. */
206 readl(ioaddr + GMAC_PMT);
207 }
208
209 return;
210}
211
212struct stmmac_ops dwmac1000_ops = {
213 .core_init = dwmac1000_core_init,
214 .dump_regs = dwmac1000_dump_regs,
215 .host_irq_status = dwmac1000_irq_status,
216 .set_filter = dwmac1000_set_filter,
217 .flow_ctrl = dwmac1000_flow_ctrl,
218 .pmt = dwmac1000_pmt,
219 .set_umac_addr = dwmac1000_set_umac_addr,
220 .get_umac_addr = dwmac1000_get_umac_addr,
221};
222
223struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
224{
225 struct mac_device_info *mac;
226 u32 uid = readl(ioaddr + GMAC_VERSION);
227
228 pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
229 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
230
231 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
232
233 mac->mac = &dwmac1000_ops;
234 mac->desc = &dwmac1000_desc_ops;
235 mac->dma = &dwmac1000_dma_ops;
236
237 mac->pmt = PMT_SUPPORTED;
238 mac->link.port = GMAC_CONTROL_PS;
239 mac->link.duplex = GMAC_CONTROL_DM;
240 mac->link.speed = GMAC_CONTROL_FES;
241 mac->mii.addr = GMAC_MII_ADDR;
242 mac->mii.data = GMAC_MII_DATA;
243
244 return mac;
245}
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/dwmac1000_dma.c
index 52586ee6895..68245508e2d 100644
--- a/drivers/net/stmmac/gmac.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,6 +3,8 @@
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for 3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code. 4 developing this code.
5 5
6 This contains the functions to handle the dma and descriptors.
7
6 Copyright (C) 2007-2009 STMicroelectronics Ltd 8 Copyright (C) 2007-2009 STMicroelectronics Ltd
7 9
8 This program is free software; you can redistribute it and/or modify it 10 This program is free software; you can redistribute it and/or modify it
@@ -24,41 +26,11 @@
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/ 27*******************************************************************************/
26 28
27#include <linux/netdevice.h> 29#include "dwmac1000.h"
28#include <linux/crc32.h> 30#include "dwmac_dma.h"
29#include <linux/mii.h>
30#include <linux/phy.h>
31
32#include "stmmac.h"
33#include "gmac.h"
34
35#undef GMAC_DEBUG
36/*#define GMAC_DEBUG*/
37#undef FRAME_FILTER_DEBUG
38/*#define FRAME_FILTER_DEBUG*/
39#ifdef GMAC_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44 31
45static void gmac_dump_regs(unsigned long ioaddr) 32static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
46{ 33 u32 dma_rx)
47 int i;
48 pr_info("\t----------------------------------------------\n"
49 "\t GMAC registers (base addr = 0x%8x)\n"
50 "\t----------------------------------------------\n",
51 (unsigned int)ioaddr);
52
53 for (i = 0; i < 55; i++) {
54 int offset = i * 4;
55 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
56 offset, readl(ioaddr + offset));
57 }
58 return;
59}
60
61static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
62{ 34{
63 u32 value = readl(ioaddr + DMA_BUS_MODE); 35 u32 value = readl(ioaddr + DMA_BUS_MODE);
64 /* DMA SW reset */ 36 /* DMA SW reset */
@@ -87,7 +59,7 @@ static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
87} 59}
88 60
89/* Transmit FIFO flush operation */ 61/* Transmit FIFO flush operation */
90static void gmac_flush_tx_fifo(unsigned long ioaddr) 62static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
91{ 63{
92 u32 csr6 = readl(ioaddr + DMA_CONTROL); 64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
93 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); 65 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -95,7 +67,7 @@ static void gmac_flush_tx_fifo(unsigned long ioaddr)
95 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); 67 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
96} 68}
97 69
98static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode, 70static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
99 int rxmode) 71 int rxmode)
100{ 72{
101 u32 csr6 = readl(ioaddr + DMA_CONTROL); 73 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -148,13 +120,13 @@ static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
148} 120}
149 121
150/* Not yet implemented --- no RMON module */ 122/* Not yet implemented --- no RMON module */
151static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, 123static void dwmac1000_dma_diagnostic_fr(void *data,
152 unsigned long ioaddr) 124 struct stmmac_extra_stats *x, unsigned long ioaddr)
153{ 125{
154 return; 126 return;
155} 127}
156 128
157static void gmac_dump_dma_regs(unsigned long ioaddr) 129static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
158{ 130{
159 int i; 131 int i;
160 pr_info(" DMA registers\n"); 132 pr_info(" DMA registers\n");
@@ -169,8 +141,9 @@ static void gmac_dump_dma_regs(unsigned long ioaddr)
169 return; 141 return;
170} 142}
171 143
172static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, 144static int dwmac1000_get_tx_frame_status(void *data,
173 struct dma_desc *p, unsigned long ioaddr) 145 struct stmmac_extra_stats *x,
146 struct dma_desc *p, unsigned long ioaddr)
174{ 147{
175 int ret = 0; 148 int ret = 0;
176 struct net_device_stats *stats = (struct net_device_stats *)data; 149 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -185,7 +158,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
185 if (unlikely(p->des01.etx.frame_flushed)) { 158 if (unlikely(p->des01.etx.frame_flushed)) {
186 DBG(KERN_ERR "\tframe_flushed error\n"); 159 DBG(KERN_ERR "\tframe_flushed error\n");
187 x->tx_frame_flushed++; 160 x->tx_frame_flushed++;
188 gmac_flush_tx_fifo(ioaddr); 161 dwmac1000_flush_tx_fifo(ioaddr);
189 } 162 }
190 163
191 if (unlikely(p->des01.etx.loss_carrier)) { 164 if (unlikely(p->des01.etx.loss_carrier)) {
@@ -213,7 +186,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
213 186
214 if (unlikely(p->des01.etx.underflow_error)) { 187 if (unlikely(p->des01.etx.underflow_error)) {
215 DBG(KERN_ERR "\tunderflow error\n"); 188 DBG(KERN_ERR "\tunderflow error\n");
216 gmac_flush_tx_fifo(ioaddr); 189 dwmac1000_flush_tx_fifo(ioaddr);
217 x->tx_underflow++; 190 x->tx_underflow++;
218 } 191 }
219 192
@@ -225,7 +198,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
225 if (unlikely(p->des01.etx.payload_error)) { 198 if (unlikely(p->des01.etx.payload_error)) {
226 DBG(KERN_ERR "\tAddr/Payload csum error\n"); 199 DBG(KERN_ERR "\tAddr/Payload csum error\n");
227 x->tx_payload_error++; 200 x->tx_payload_error++;
228 gmac_flush_tx_fifo(ioaddr); 201 dwmac1000_flush_tx_fifo(ioaddr);
229 } 202 }
230 203
231 ret = -1; 204 ret = -1;
@@ -245,12 +218,12 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
245 return ret; 218 return ret;
246} 219}
247 220
248static int gmac_get_tx_len(struct dma_desc *p) 221static int dwmac1000_get_tx_len(struct dma_desc *p)
249{ 222{
250 return p->des01.etx.buffer1_size; 223 return p->des01.etx.buffer1_size;
251} 224}
252 225
253static int gmac_coe_rdes0(int ipc_err, int type, int payload_err) 226static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
254{ 227{
255 int ret = good_frame; 228 int ret = good_frame;
256 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; 229 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
@@ -293,8 +266,8 @@ static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
293 return ret; 266 return ret;
294} 267}
295 268
296static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, 269static int dwmac1000_get_rx_frame_status(void *data,
297 struct dma_desc *p) 270 struct stmmac_extra_stats *x, struct dma_desc *p)
298{ 271{
299 int ret = good_frame; 272 int ret = good_frame;
300 struct net_device_stats *stats = (struct net_device_stats *)data; 273 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -339,7 +312,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
339 * It doesn't match with the information reported into the databook. 312 * It doesn't match with the information reported into the databook.
340 * At any rate, we need to understand if the CSUM hw computation is ok 313 * At any rate, we need to understand if the CSUM hw computation is ok
341 * and report this info to the upper layers. */ 314 * and report this info to the upper layers. */
342 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error, 315 ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
343 p->des01.erx.frame_type, p->des01.erx.payload_csum_error); 316 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
344 317
345 if (unlikely(p->des01.erx.dribbling)) { 318 if (unlikely(p->des01.erx.dribbling)) {
@@ -370,181 +343,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
370 return ret; 343 return ret;
371} 344}
372 345
373static void gmac_irq_status(unsigned long ioaddr) 346static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
374{
375 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
376
377 /* Not used events (e.g. MMC interrupts) are not handled. */
378 if ((intr_status & mmc_tx_irq))
379 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
380 readl(ioaddr + GMAC_MMC_TX_INTR));
381 if (unlikely(intr_status & mmc_rx_irq))
382 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
383 readl(ioaddr + GMAC_MMC_RX_INTR));
384 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
385 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
386 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
387 if (unlikely(intr_status & pmt_irq)) {
388 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
389 /* clear the PMT bits 5 and 6 by reading the PMT
390 * status register. */
391 readl(ioaddr + GMAC_PMT);
392 }
393
394 return;
395}
396
397static void gmac_core_init(unsigned long ioaddr)
398{
399 u32 value = readl(ioaddr + GMAC_CONTROL);
400 value |= GMAC_CORE_INIT;
401 writel(value, ioaddr + GMAC_CONTROL);
402
403 /* STBus Bridge Configuration */
404 /*writel(0xc5608, ioaddr + 0x00007000);*/
405
406 /* Freeze MMC counters */
407 writel(0x8, ioaddr + GMAC_MMC_CTRL);
408 /* Mask GMAC interrupts */
409 writel(0x207, ioaddr + GMAC_INT_MASK);
410
411#ifdef STMMAC_VLAN_TAG_USED
412 /* Tag detection without filtering */
413 writel(0x0, ioaddr + GMAC_VLAN_TAG);
414#endif
415 return;
416}
417
418static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
419 unsigned int reg_n)
420{
421 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
422 GMAC_ADDR_LOW(reg_n));
423}
424
425static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
426 unsigned int reg_n)
427{
428 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
429 GMAC_ADDR_LOW(reg_n));
430}
431
432static void gmac_set_filter(struct net_device *dev)
433{
434 unsigned long ioaddr = dev->base_addr;
435 unsigned int value = 0;
436
437 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
438 __func__, dev->mc_count, dev->uc_count);
439
440 if (dev->flags & IFF_PROMISC)
441 value = GMAC_FRAME_FILTER_PR;
442 else if ((dev->mc_count > HASH_TABLE_SIZE)
443 || (dev->flags & IFF_ALLMULTI)) {
444 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
445 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
446 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
447 } else if (dev->mc_count > 0) {
448 int i;
449 u32 mc_filter[2];
450 struct dev_mc_list *mclist;
451
452 /* Hash filter for multicast */
453 value = GMAC_FRAME_FILTER_HMC;
454
455 memset(mc_filter, 0, sizeof(mc_filter));
456 for (i = 0, mclist = dev->mc_list;
457 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
458 /* The upper 6 bits of the calculated CRC are used to
459 index the contens of the hash table */
460 int bit_nr =
461 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
462 /* The most significant bit determines the register to
463 * use (H/L) while the other 5 bits determine the bit
464 * within the register. */
465 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
466 }
467 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
468 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
469 }
470
471 /* Handle multiple unicast addresses (perfect filtering)*/
472 if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
473 /* Switch to promiscuous mode is more than 16 addrs
474 are required */
475 value |= GMAC_FRAME_FILTER_PR;
476 else {
477 int i;
478 struct dev_addr_list *uc_ptr = dev->uc_list;
479
480 for (i = 0; i < dev->uc_count; i++) {
481 gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
482 i + 1);
483
484 DBG(KERN_INFO "\t%d "
485 "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
486 "%02x\n", i + 1,
487 uc_ptr->da_addr[0], uc_ptr->da_addr[1],
488 uc_ptr->da_addr[2], uc_ptr->da_addr[3],
489 uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
490 uc_ptr = uc_ptr->next;
491 }
492 }
493
494#ifdef FRAME_FILTER_DEBUG
495 /* Enable Receive all mode (to debug filtering_fail errors) */
496 value |= GMAC_FRAME_FILTER_RA;
497#endif
498 writel(value, ioaddr + GMAC_FRAME_FILTER);
499
500 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
501 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
502 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
503
504 return;
505}
506
507static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
508 unsigned int fc, unsigned int pause_time)
509{
510 unsigned int flow = 0;
511
512 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
513 if (fc & FLOW_RX) {
514 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
515 flow |= GMAC_FLOW_CTRL_RFE;
516 }
517 if (fc & FLOW_TX) {
518 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
519 flow |= GMAC_FLOW_CTRL_TFE;
520 }
521
522 if (duplex) {
523 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
524 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
525 }
526
527 writel(flow, ioaddr + GMAC_FLOW_CTRL);
528 return;
529}
530
531static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
532{
533 unsigned int pmt = 0;
534
535 if (mode == WAKE_MAGIC) {
536 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
537 pmt |= power_down | magic_pkt_en;
538 } else if (mode == WAKE_UCAST) {
539 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
540 pmt |= global_unicast;
541 }
542
543 writel(pmt, ioaddr + GMAC_PMT);
544 return;
545}
546
547static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
548 int disable_rx_ic) 347 int disable_rx_ic)
549{ 348{
550 int i; 349 int i;
@@ -562,7 +361,7 @@ static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
562 return; 361 return;
563} 362}
564 363
565static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 364static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
566{ 365{
567 int i; 366 int i;
568 367
@@ -576,32 +375,32 @@ static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
576 return; 375 return;
577} 376}
578 377
579static int gmac_get_tx_owner(struct dma_desc *p) 378static int dwmac1000_get_tx_owner(struct dma_desc *p)
580{ 379{
581 return p->des01.etx.own; 380 return p->des01.etx.own;
582} 381}
583 382
584static int gmac_get_rx_owner(struct dma_desc *p) 383static int dwmac1000_get_rx_owner(struct dma_desc *p)
585{ 384{
586 return p->des01.erx.own; 385 return p->des01.erx.own;
587} 386}
588 387
589static void gmac_set_tx_owner(struct dma_desc *p) 388static void dwmac1000_set_tx_owner(struct dma_desc *p)
590{ 389{
591 p->des01.etx.own = 1; 390 p->des01.etx.own = 1;
592} 391}
593 392
594static void gmac_set_rx_owner(struct dma_desc *p) 393static void dwmac1000_set_rx_owner(struct dma_desc *p)
595{ 394{
596 p->des01.erx.own = 1; 395 p->des01.erx.own = 1;
597} 396}
598 397
599static int gmac_get_tx_ls(struct dma_desc *p) 398static int dwmac1000_get_tx_ls(struct dma_desc *p)
600{ 399{
601 return p->des01.etx.last_segment; 400 return p->des01.etx.last_segment;
602} 401}
603 402
604static void gmac_release_tx_desc(struct dma_desc *p) 403static void dwmac1000_release_tx_desc(struct dma_desc *p)
605{ 404{
606 int ter = p->des01.etx.end_ring; 405 int ter = p->des01.etx.end_ring;
607 406
@@ -611,7 +410,7 @@ static void gmac_release_tx_desc(struct dma_desc *p)
611 return; 410 return;
612} 411}
613 412
614static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 413static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
615 int csum_flag) 414 int csum_flag)
616{ 415{
617 p->des01.etx.first_segment = is_fs; 416 p->des01.etx.first_segment = is_fs;
@@ -625,69 +424,51 @@ static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
625 p->des01.etx.checksum_insertion = cic_full; 424 p->des01.etx.checksum_insertion = cic_full;
626} 425}
627 426
628static void gmac_clear_tx_ic(struct dma_desc *p) 427static void dwmac1000_clear_tx_ic(struct dma_desc *p)
629{ 428{
630 p->des01.etx.interrupt = 0; 429 p->des01.etx.interrupt = 0;
631} 430}
632 431
633static void gmac_close_tx_desc(struct dma_desc *p) 432static void dwmac1000_close_tx_desc(struct dma_desc *p)
634{ 433{
635 p->des01.etx.last_segment = 1; 434 p->des01.etx.last_segment = 1;
636 p->des01.etx.interrupt = 1; 435 p->des01.etx.interrupt = 1;
637} 436}
638 437
639static int gmac_get_rx_frame_len(struct dma_desc *p) 438static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
640{ 439{
641 return p->des01.erx.frame_length; 440 return p->des01.erx.frame_length;
642} 441}
643 442
644struct stmmac_ops gmac_driver = { 443struct stmmac_dma_ops dwmac1000_dma_ops = {
645 .core_init = gmac_core_init, 444 .init = dwmac1000_dma_init,
646 .dump_mac_regs = gmac_dump_regs, 445 .dump_regs = dwmac1000_dump_dma_regs,
647 .dma_init = gmac_dma_init, 446 .dma_mode = dwmac1000_dma_operation_mode,
648 .dump_dma_regs = gmac_dump_dma_regs, 447 .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
649 .dma_mode = gmac_dma_operation_mode, 448 .enable_dma_transmission = dwmac_enable_dma_transmission,
650 .dma_diagnostic_fr = gmac_dma_diagnostic_fr, 449 .enable_dma_irq = dwmac_enable_dma_irq,
651 .tx_status = gmac_get_tx_frame_status, 450 .disable_dma_irq = dwmac_disable_dma_irq,
652 .rx_status = gmac_get_rx_frame_status, 451 .start_tx = dwmac_dma_start_tx,
653 .get_tx_len = gmac_get_tx_len, 452 .stop_tx = dwmac_dma_stop_tx,
654 .set_filter = gmac_set_filter, 453 .start_rx = dwmac_dma_start_rx,
655 .flow_ctrl = gmac_flow_ctrl, 454 .stop_rx = dwmac_dma_stop_rx,
656 .pmt = gmac_pmt, 455 .dma_interrupt = dwmac_dma_interrupt,
657 .init_rx_desc = gmac_init_rx_desc,
658 .init_tx_desc = gmac_init_tx_desc,
659 .get_tx_owner = gmac_get_tx_owner,
660 .get_rx_owner = gmac_get_rx_owner,
661 .release_tx_desc = gmac_release_tx_desc,
662 .prepare_tx_desc = gmac_prepare_tx_desc,
663 .clear_tx_ic = gmac_clear_tx_ic,
664 .close_tx_desc = gmac_close_tx_desc,
665 .get_tx_ls = gmac_get_tx_ls,
666 .set_tx_owner = gmac_set_tx_owner,
667 .set_rx_owner = gmac_set_rx_owner,
668 .get_rx_frame_len = gmac_get_rx_frame_len,
669 .host_irq_status = gmac_irq_status,
670 .set_umac_addr = gmac_set_umac_addr,
671 .get_umac_addr = gmac_get_umac_addr,
672}; 456};
673 457
674struct mac_device_info *gmac_setup(unsigned long ioaddr) 458struct stmmac_desc_ops dwmac1000_desc_ops = {
675{ 459 .tx_status = dwmac1000_get_tx_frame_status,
676 struct mac_device_info *mac; 460 .rx_status = dwmac1000_get_rx_frame_status,
677 u32 uid = readl(ioaddr + GMAC_VERSION); 461 .get_tx_len = dwmac1000_get_tx_len,
678 462 .init_rx_desc = dwmac1000_init_rx_desc,
679 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n", 463 .init_tx_desc = dwmac1000_init_tx_desc,
680 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); 464 .get_tx_owner = dwmac1000_get_tx_owner,
681 465 .get_rx_owner = dwmac1000_get_rx_owner,
682 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 466 .release_tx_desc = dwmac1000_release_tx_desc,
683 467 .prepare_tx_desc = dwmac1000_prepare_tx_desc,
684 mac->ops = &gmac_driver; 468 .clear_tx_ic = dwmac1000_clear_tx_ic,
685 mac->hw.pmt = PMT_SUPPORTED; 469 .close_tx_desc = dwmac1000_close_tx_desc,
686 mac->hw.link.port = GMAC_CONTROL_PS; 470 .get_tx_ls = dwmac1000_get_tx_ls,
687 mac->hw.link.duplex = GMAC_CONTROL_DM; 471 .set_tx_owner = dwmac1000_set_tx_owner,
688 mac->hw.link.speed = GMAC_CONTROL_FES; 472 .set_rx_owner = dwmac1000_set_rx_owner,
689 mac->hw.mii.addr = GMAC_MII_ADDR; 473 .get_rx_frame_len = dwmac1000_get_rx_frame_len,
690 mac->hw.mii.data = GMAC_MII_DATA; 474};
691
692 return mac;
693}
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
new file mode 100644
index 00000000000..de848d9f606
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -0,0 +1,107 @@
1/*******************************************************************************
2 DWMAC DMA Header file.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25/* DMA CRS Control and Status Register Mapping */
26#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
27#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
28#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
29#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
30#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
31#define DMA_STATUS 0x00001014 /* Status Register */
32#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
33#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
34#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
35#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
36#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
37
38/* DMA Control register defines */
39#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
40#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
41
42/* DMA Normal interrupt */
43#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
44#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
45#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
46#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
47#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
48
49#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
50 DMA_INTR_ENA_TIE)
51
52/* DMA Abnormal interrupt */
53#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
54#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
55#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
56#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
57#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
58#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
59#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
60#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
61#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
62#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
63
64#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
65 DMA_INTR_ENA_UNE)
66
67/* DMA default interrupt mask */
68#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
69
70/* DMA Status register defines */
71#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
72#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
73#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
74#define DMA_STATUS_GMI 0x08000000
75#define DMA_STATUS_GLI 0x04000000
76#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
77#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
78#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
79#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
80#define DMA_STATUS_TS_SHIFT 20
81#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
82#define DMA_STATUS_RS_SHIFT 17
83#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
84#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
85#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
86#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
87#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
88#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
89#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
90#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
91#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
92#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
93#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
94#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
95#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
96#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
98
99extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
100extern void dwmac_enable_dma_irq(unsigned long ioaddr);
101extern void dwmac_disable_dma_irq(unsigned long ioaddr);
102extern void dwmac_dma_start_tx(unsigned long ioaddr);
103extern void dwmac_dma_stop_tx(unsigned long ioaddr);
104extern void dwmac_dma_start_rx(unsigned long ioaddr);
105extern void dwmac_dma_stop_rx(unsigned long ioaddr);
106extern int dwmac_dma_interrupt(unsigned long ioaddr,
107 struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
new file mode 100644
index 00000000000..d4adb1eaa44
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -0,0 +1,263 @@
1/*******************************************************************************
2 Copyright (C) 2007-2009 STMicroelectronics Ltd
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms and conditions of the GNU General Public License,
6 version 2, as published by the Free Software Foundation.
7
8 This program is distributed in the hope it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 more details.
12
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16
17 The full GNU General Public License is included in this distribution in
18 the file called "COPYING".
19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/
22
23#include <linux/io.h>
24#include "common.h"
25#include "dwmac_dma.h"
26
27#undef DWMAC_DMA_DEBUG
28#ifdef DWMAC_DMA_DEBUG
29#define DBG(fmt, args...) printk(fmt, ## args)
30#else
31#define DBG(fmt, args...) do { } while (0)
32#endif
33
34/* CSR1 enables the transmit DMA to check for new descriptor */
35void dwmac_enable_dma_transmission(unsigned long ioaddr)
36{
37 writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
38}
39
40void dwmac_enable_dma_irq(unsigned long ioaddr)
41{
42 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
43}
44
45void dwmac_disable_dma_irq(unsigned long ioaddr)
46{
47 writel(0, ioaddr + DMA_INTR_ENA);
48}
49
50void dwmac_dma_start_tx(unsigned long ioaddr)
51{
52 u32 value = readl(ioaddr + DMA_CONTROL);
53 value |= DMA_CONTROL_ST;
54 writel(value, ioaddr + DMA_CONTROL);
55 return;
56}
57
58void dwmac_dma_stop_tx(unsigned long ioaddr)
59{
60 u32 value = readl(ioaddr + DMA_CONTROL);
61 value &= ~DMA_CONTROL_ST;
62 writel(value, ioaddr + DMA_CONTROL);
63 return;
64}
65
66void dwmac_dma_start_rx(unsigned long ioaddr)
67{
68 u32 value = readl(ioaddr + DMA_CONTROL);
69 value |= DMA_CONTROL_SR;
70 writel(value, ioaddr + DMA_CONTROL);
71
72 return;
73}
74
75void dwmac_dma_stop_rx(unsigned long ioaddr)
76{
77 u32 value = readl(ioaddr + DMA_CONTROL);
78 value &= ~DMA_CONTROL_SR;
79 writel(value, ioaddr + DMA_CONTROL);
80
81 return;
82}
83
84#ifdef DWMAC_DMA_DEBUG
85static void show_tx_process_state(unsigned int status)
86{
87 unsigned int state;
88 state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
89
90 switch (state) {
91 case 0:
92 pr_info("- TX (Stopped): Reset or Stop command\n");
93 break;
94 case 1:
95 pr_info("- TX (Running):Fetching the Tx desc\n");
96 break;
97 case 2:
98 pr_info("- TX (Running): Waiting for end of tx\n");
99 break;
100 case 3:
101 pr_info("- TX (Running): Reading the data "
102 "and queuing the data into the Tx buf\n");
103 break;
104 case 6:
105 pr_info("- TX (Suspended): Tx Buff Underflow "
106 "or an unavailable Transmit descriptor\n");
107 break;
108 case 7:
109 pr_info("- TX (Running): Closing Tx descriptor\n");
110 break;
111 default:
112 break;
113 }
114 return;
115}
116
117static void show_rx_process_state(unsigned int status)
118{
119 unsigned int state;
120 state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
121
122 switch (state) {
123 case 0:
124 pr_info("- RX (Stopped): Reset or Stop command\n");
125 break;
126 case 1:
127 pr_info("- RX (Running): Fetching the Rx desc\n");
128 break;
129 case 2:
130 pr_info("- RX (Running):Checking for end of pkt\n");
131 break;
132 case 3:
133 pr_info("- RX (Running): Waiting for Rx pkt\n");
134 break;
135 case 4:
136 pr_info("- RX (Suspended): Unavailable Rx buf\n");
137 break;
138 case 5:
139 pr_info("- RX (Running): Closing Rx descriptor\n");
140 break;
141 case 6:
142 pr_info("- RX(Running): Flushing the current frame"
143 " from the Rx buf\n");
144 break;
145 case 7:
146 pr_info("- RX (Running): Queuing the Rx frame"
147 " from the Rx buf into memory\n");
148 break;
149 default:
150 break;
151 }
152 return;
153}
154#endif
155
156int dwmac_dma_interrupt(unsigned long ioaddr,
157 struct stmmac_extra_stats *x)
158{
159 int ret = 0;
160 /* read the status register (CSR5) */
161 u32 intr_status = readl(ioaddr + DMA_STATUS);
162
163 DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
164#ifdef DWMAC_DMA_DEBUG
165 /* It displays the DMA process states (CSR5 register) */
166 show_tx_process_state(intr_status);
167 show_rx_process_state(intr_status);
168#endif
169 /* ABNORMAL interrupts */
170 if (unlikely(intr_status & DMA_STATUS_AIS)) {
171 DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
172 if (unlikely(intr_status & DMA_STATUS_UNF)) {
173 DBG(INFO, "transmit underflow\n");
174 ret = tx_hard_error_bump_tc;
175 x->tx_undeflow_irq++;
176 }
177 if (unlikely(intr_status & DMA_STATUS_TJT)) {
178 DBG(INFO, "transmit jabber\n");
179 x->tx_jabber_irq++;
180 }
181 if (unlikely(intr_status & DMA_STATUS_OVF)) {
182 DBG(INFO, "recv overflow\n");
183 x->rx_overflow_irq++;
184 }
185 if (unlikely(intr_status & DMA_STATUS_RU)) {
186 DBG(INFO, "receive buffer unavailable\n");
187 x->rx_buf_unav_irq++;
188 }
189 if (unlikely(intr_status & DMA_STATUS_RPS)) {
190 DBG(INFO, "receive process stopped\n");
191 x->rx_process_stopped_irq++;
192 }
193 if (unlikely(intr_status & DMA_STATUS_RWT)) {
194 DBG(INFO, "receive watchdog\n");
195 x->rx_watchdog_irq++;
196 }
197 if (unlikely(intr_status & DMA_STATUS_ETI)) {
198 DBG(INFO, "transmit early interrupt\n");
199 x->tx_early_irq++;
200 }
201 if (unlikely(intr_status & DMA_STATUS_TPS)) {
202 DBG(INFO, "transmit process stopped\n");
203 x->tx_process_stopped_irq++;
204 ret = tx_hard_error;
205 }
206 if (unlikely(intr_status & DMA_STATUS_FBI)) {
207 DBG(INFO, "fatal bus error\n");
208 x->fatal_bus_error_irq++;
209 ret = tx_hard_error;
210 }
211 }
212 /* TX/RX NORMAL interrupts */
213 if (intr_status & DMA_STATUS_NIS) {
214 x->normal_irq_n++;
215 if (likely((intr_status & DMA_STATUS_RI) ||
216 (intr_status & (DMA_STATUS_TI))))
217 ret = handle_tx_rx;
218 }
219 /* Optional hardware blocks, interrupts should be disabled */
220 if (unlikely(intr_status &
221 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
222 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
223 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
224 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
225
226 DBG(INFO, "\n\n");
227 return ret;
228}
229
230
231void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
232 unsigned int high, unsigned int low)
233{
234 unsigned long data;
235
236 data = (addr[5] << 8) | addr[4];
237 writel(data, ioaddr + high);
238 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
239 writel(data, ioaddr + low);
240
241 return;
242}
243
244void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
245 unsigned int high, unsigned int low)
246{
247 unsigned int hi_addr, lo_addr;
248
249 /* Read the MAC address from the hardware */
250 hi_addr = readl(ioaddr + high);
251 lo_addr = readl(ioaddr + low);
252
253 /* Extract the MAC address from the high and low words */
254 addr[0] = lo_addr & 0xff;
255 addr[1] = (lo_addr >> 8) & 0xff;
256 addr[2] = (lo_addr >> 16) & 0xff;
257 addr[3] = (lo_addr >> 24) & 0xff;
258 addr[4] = hi_addr & 0xff;
259 addr[5] = (hi_addr >> 8) & 0xff;
260
261 return;
262}
263
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 6d2eae3040e..ba35e6943cf 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,8 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Oct_09" 23#define DRV_MODULE_VERSION "Jan_2010"
24#include <linux/stmmac.h>
24 25
25#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 26#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
26#define STMMAC_VLAN_TAG_USED 27#define STMMAC_VLAN_TAG_USED
@@ -57,7 +58,7 @@ struct stmmac_priv {
57 int rx_csum; 58 int rx_csum;
58 unsigned int dma_buf_sz; 59 unsigned int dma_buf_sz;
59 struct device *device; 60 struct device *device;
60 struct mac_device_info *mac_type; 61 struct mac_device_info *hw;
61 62
62 struct stmmac_extra_stats xstats; 63 struct stmmac_extra_stats xstats;
63 struct napi_struct napi; 64 struct napi_struct napi;
@@ -69,6 +70,7 @@ struct stmmac_priv {
69 int phy_mask; 70 int phy_mask;
70 int (*phy_reset) (void *priv); 71 int (*phy_reset) (void *priv);
71 void (*fix_mac_speed) (void *priv, unsigned int speed); 72 void (*fix_mac_speed) (void *priv, unsigned int speed);
73 void (*bus_setup)(unsigned long ioaddr);
72 void *bsp_priv; 74 void *bsp_priv;
73 75
74 int phy_irq; 76 int phy_irq;
@@ -93,6 +95,28 @@ struct stmmac_priv {
93#endif 95#endif
94}; 96};
95 97
98#ifdef CONFIG_STM_DRIVERS
99#include <linux/stm/pad.h>
100static inline int stmmac_claim_resource(struct platform_device *pdev)
101{
102 int ret = 0;
103 struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
104
105 /* Pad routing setup */
106 if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
107 dev_name(&pdev->dev)))) {
108 printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
109 ret = -ENODEV;
110 }
111 return ret;
112}
113#else
114static inline int stmmac_claim_resource(struct platform_device *pdev)
115{
116 return 0;
117}
118#endif
119
96extern int stmmac_mdio_unregister(struct net_device *ndev); 120extern int stmmac_mdio_unregister(struct net_device *ndev);
97extern int stmmac_mdio_register(struct net_device *ndev); 121extern int stmmac_mdio_register(struct net_device *ndev);
98extern void stmmac_set_ethtool_ops(struct net_device *netdev); 122extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 694ebe6a075..0abeff6193a 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -28,6 +28,7 @@
28#include <linux/phy.h> 28#include <linux/phy.h>
29 29
30#include "stmmac.h" 30#include "stmmac.h"
31#include "dwmac_dma.h"
31 32
32#define REG_SPACE_SIZE 0x1054 33#define REG_SPACE_SIZE 0x1054
33#define MAC100_ETHTOOL_NAME "st_mac100" 34#define MAC100_ETHTOOL_NAME "st_mac100"
@@ -268,8 +269,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
268 } 269 }
269 } else { 270 } else {
270 unsigned long ioaddr = netdev->base_addr; 271 unsigned long ioaddr = netdev->base_addr;
271 priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex, 272 priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
272 priv->flow_ctrl, priv->pause); 273 priv->flow_ctrl, priv->pause);
273 } 274 }
274 spin_unlock(&priv->lock); 275 spin_unlock(&priv->lock);
275 return ret; 276 return ret;
@@ -283,8 +284,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
283 int i; 284 int i;
284 285
285 /* Update HW stats if supported */ 286 /* Update HW stats if supported */
286 priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats, 287 priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
287 ioaddr); 288 ioaddr);
288 289
289 for (i = 0; i < STMMAC_STATS_LEN; i++) { 290 for (i = 0; i < STMMAC_STATS_LEN; i++) {
290 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 291 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 508fba8fa07..a6733612d64 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -32,7 +32,6 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
37#include <linux/platform_device.h> 36#include <linux/platform_device.h>
38#include <linux/ip.h> 37#include <linux/ip.h>
@@ -45,7 +44,6 @@
45#include <linux/phy.h> 44#include <linux/phy.h>
46#include <linux/if_vlan.h> 45#include <linux/if_vlan.h>
47#include <linux/dma-mapping.h> 46#include <linux/dma-mapping.h>
48#include <linux/stm/soc.h>
49#include "stmmac.h" 47#include "stmmac.h"
50 48
51#define STMMAC_RESOURCE_NAME "stmmaceth" 49#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -226,41 +224,38 @@ static void stmmac_adjust_link(struct net_device *dev)
226 if (phydev->duplex != priv->oldduplex) { 224 if (phydev->duplex != priv->oldduplex) {
227 new_state = 1; 225 new_state = 1;
228 if (!(phydev->duplex)) 226 if (!(phydev->duplex))
229 ctrl &= ~priv->mac_type->hw.link.duplex; 227 ctrl &= ~priv->hw->link.duplex;
230 else 228 else
231 ctrl |= priv->mac_type->hw.link.duplex; 229 ctrl |= priv->hw->link.duplex;
232 priv->oldduplex = phydev->duplex; 230 priv->oldduplex = phydev->duplex;
233 } 231 }
234 /* Flow Control operation */ 232 /* Flow Control operation */
235 if (phydev->pause) 233 if (phydev->pause)
236 priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex, 234 priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
237 fc, pause_time); 235 fc, pause_time);
238 236
239 if (phydev->speed != priv->speed) { 237 if (phydev->speed != priv->speed) {
240 new_state = 1; 238 new_state = 1;
241 switch (phydev->speed) { 239 switch (phydev->speed) {
242 case 1000: 240 case 1000:
243 if (likely(priv->is_gmac)) 241 if (likely(priv->is_gmac))
244 ctrl &= ~priv->mac_type->hw.link.port; 242 ctrl &= ~priv->hw->link.port;
245 break; 243 break;
246 case 100: 244 case 100:
247 case 10: 245 case 10:
248 if (priv->is_gmac) { 246 if (priv->is_gmac) {
249 ctrl |= priv->mac_type->hw.link.port; 247 ctrl |= priv->hw->link.port;
250 if (phydev->speed == SPEED_100) { 248 if (phydev->speed == SPEED_100) {
251 ctrl |= 249 ctrl |= priv->hw->link.speed;
252 priv->mac_type->hw.link.
253 speed;
254 } else { 250 } else {
255 ctrl &= 251 ctrl &= ~(priv->hw->link.speed);
256 ~(priv->mac_type->hw.
257 link.speed);
258 } 252 }
259 } else { 253 } else {
260 ctrl &= ~priv->mac_type->hw.link.port; 254 ctrl &= ~priv->hw->link.port;
261 } 255 }
262 priv->fix_mac_speed(priv->bsp_priv, 256 if (likely(priv->fix_mac_speed))
263 phydev->speed); 257 priv->fix_mac_speed(priv->bsp_priv,
258 phydev->speed);
264 break; 259 break;
265 default: 260 default:
266 if (netif_msg_link(priv)) 261 if (netif_msg_link(priv))
@@ -305,8 +300,8 @@ static int stmmac_init_phy(struct net_device *dev)
305{ 300{
306 struct stmmac_priv *priv = netdev_priv(dev); 301 struct stmmac_priv *priv = netdev_priv(dev);
307 struct phy_device *phydev; 302 struct phy_device *phydev;
308 char phy_id[BUS_ID_SIZE]; /* PHY to connect */ 303 char phy_id[MII_BUS_ID_SIZE + 3];
309 char bus_id[BUS_ID_SIZE]; 304 char bus_id[MII_BUS_ID_SIZE];
310 305
311 priv->oldlink = 0; 306 priv->oldlink = 0;
312 priv->speed = 0; 307 priv->speed = 0;
@@ -318,7 +313,8 @@ static int stmmac_init_phy(struct net_device *dev)
318 } 313 }
319 314
320 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 315 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
321 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr); 316 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
317 priv->phy_addr);
322 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 318 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
323 319
324 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, 320 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
@@ -508,8 +504,8 @@ static void init_dma_desc_rings(struct net_device *dev)
508 priv->cur_tx = 0; 504 priv->cur_tx = 0;
509 505
510 /* Clear the Rx/Tx descriptors */ 506 /* Clear the Rx/Tx descriptors */
511 priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic); 507 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
512 priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize); 508 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
513 509
514 if (netif_msg_hw(priv)) { 510 if (netif_msg_hw(priv)) {
515 pr_info("RX descriptor ring:\n"); 511 pr_info("RX descriptor ring:\n");
@@ -544,8 +540,8 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
544 struct dma_desc *p = priv->dma_tx + i; 540 struct dma_desc *p = priv->dma_tx + i;
545 if (p->des2) 541 if (p->des2)
546 dma_unmap_single(priv->device, p->des2, 542 dma_unmap_single(priv->device, p->des2,
547 priv->mac_type->ops->get_tx_len(p), 543 priv->hw->desc->get_tx_len(p),
548 DMA_TO_DEVICE); 544 DMA_TO_DEVICE);
549 dev_kfree_skb_any(priv->tx_skbuff[i]); 545 dev_kfree_skb_any(priv->tx_skbuff[i]);
550 priv->tx_skbuff[i] = NULL; 546 priv->tx_skbuff[i] = NULL;
551 } 547 }
@@ -575,50 +571,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
575} 571}
576 572
577/** 573/**
578 * stmmac_dma_start_tx
579 * @ioaddr: device I/O address
580 * Description: this function starts the DMA tx process.
581 */
582static void stmmac_dma_start_tx(unsigned long ioaddr)
583{
584 u32 value = readl(ioaddr + DMA_CONTROL);
585 value |= DMA_CONTROL_ST;
586 writel(value, ioaddr + DMA_CONTROL);
587 return;
588}
589
590static void stmmac_dma_stop_tx(unsigned long ioaddr)
591{
592 u32 value = readl(ioaddr + DMA_CONTROL);
593 value &= ~DMA_CONTROL_ST;
594 writel(value, ioaddr + DMA_CONTROL);
595 return;
596}
597
598/**
599 * stmmac_dma_start_rx
600 * @ioaddr: device I/O address
601 * Description: this function starts the DMA rx process.
602 */
603static void stmmac_dma_start_rx(unsigned long ioaddr)
604{
605 u32 value = readl(ioaddr + DMA_CONTROL);
606 value |= DMA_CONTROL_SR;
607 writel(value, ioaddr + DMA_CONTROL);
608
609 return;
610}
611
612static void stmmac_dma_stop_rx(unsigned long ioaddr)
613{
614 u32 value = readl(ioaddr + DMA_CONTROL);
615 value &= ~DMA_CONTROL_SR;
616 writel(value, ioaddr + DMA_CONTROL);
617
618 return;
619}
620
621/**
622 * stmmac_dma_operation_mode - HW DMA operation mode 574 * stmmac_dma_operation_mode - HW DMA operation mode
623 * @priv : pointer to the private device structure. 575 * @priv : pointer to the private device structure.
624 * Description: it sets the DMA operation mode: tx/rx DMA thresholds 576 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
@@ -629,18 +581,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
629{ 581{
630 if (!priv->is_gmac) { 582 if (!priv->is_gmac) {
631 /* MAC 10/100 */ 583 /* MAC 10/100 */
632 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0); 584 priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
633 priv->tx_coe = NO_HW_CSUM; 585 priv->tx_coe = NO_HW_CSUM;
634 } else { 586 } else {
635 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { 587 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
636 priv->mac_type->ops->dma_mode(priv->dev->base_addr, 588 priv->hw->dma->dma_mode(priv->dev->base_addr,
637 SF_DMA_MODE, SF_DMA_MODE); 589 SF_DMA_MODE, SF_DMA_MODE);
638 tc = SF_DMA_MODE; 590 tc = SF_DMA_MODE;
639 priv->tx_coe = HW_CSUM; 591 priv->tx_coe = HW_CSUM;
640 } else { 592 } else {
641 /* Checksum computation is performed in software. */ 593 /* Checksum computation is performed in software. */
642 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 594 priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
643 SF_DMA_MODE); 595 SF_DMA_MODE);
644 priv->tx_coe = NO_HW_CSUM; 596 priv->tx_coe = NO_HW_CSUM;
645 } 597 }
646 } 598 }
@@ -649,88 +601,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
649 return; 601 return;
650} 602}
651 603
652#ifdef STMMAC_DEBUG
653/**
654 * show_tx_process_state
655 * @status: tx descriptor status field
656 * Description: it shows the Transmit Process State for CSR5[22:20]
657 */
658static void show_tx_process_state(unsigned int status)
659{
660 unsigned int state;
661 state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
662
663 switch (state) {
664 case 0:
665 pr_info("- TX (Stopped): Reset or Stop command\n");
666 break;
667 case 1:
668 pr_info("- TX (Running):Fetching the Tx desc\n");
669 break;
670 case 2:
671 pr_info("- TX (Running): Waiting for end of tx\n");
672 break;
673 case 3:
674 pr_info("- TX (Running): Reading the data "
675 "and queuing the data into the Tx buf\n");
676 break;
677 case 6:
678 pr_info("- TX (Suspended): Tx Buff Underflow "
679 "or an unavailable Transmit descriptor\n");
680 break;
681 case 7:
682 pr_info("- TX (Running): Closing Tx descriptor\n");
683 break;
684 default:
685 break;
686 }
687 return;
688}
689
690/**
691 * show_rx_process_state
692 * @status: rx descriptor status field
693 * Description: it shows the Receive Process State for CSR5[19:17]
694 */
695static void show_rx_process_state(unsigned int status)
696{
697 unsigned int state;
698 state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
699
700 switch (state) {
701 case 0:
702 pr_info("- RX (Stopped): Reset or Stop command\n");
703 break;
704 case 1:
705 pr_info("- RX (Running): Fetching the Rx desc\n");
706 break;
707 case 2:
708 pr_info("- RX (Running):Checking for end of pkt\n");
709 break;
710 case 3:
711 pr_info("- RX (Running): Waiting for Rx pkt\n");
712 break;
713 case 4:
714 pr_info("- RX (Suspended): Unavailable Rx buf\n");
715 break;
716 case 5:
717 pr_info("- RX (Running): Closing Rx descriptor\n");
718 break;
719 case 6:
720 pr_info("- RX(Running): Flushing the current frame"
721 " from the Rx buf\n");
722 break;
723 case 7:
724 pr_info("- RX (Running): Queuing the Rx frame"
725 " from the Rx buf into memory\n");
726 break;
727 default:
728 break;
729 }
730 return;
731}
732#endif
733
734/** 604/**
735 * stmmac_tx: 605 * stmmac_tx:
736 * @priv: private driver structure 606 * @priv: private driver structure
@@ -748,16 +618,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
748 struct dma_desc *p = priv->dma_tx + entry; 618 struct dma_desc *p = priv->dma_tx + entry;
749 619
750 /* Check if the descriptor is owned by the DMA. */ 620 /* Check if the descriptor is owned by the DMA. */
751 if (priv->mac_type->ops->get_tx_owner(p)) 621 if (priv->hw->desc->get_tx_owner(p))
752 break; 622 break;
753 623
754 /* Verify tx error by looking at the last segment */ 624 /* Verify tx error by looking at the last segment */
755 last = priv->mac_type->ops->get_tx_ls(p); 625 last = priv->hw->desc->get_tx_ls(p);
756 if (likely(last)) { 626 if (likely(last)) {
757 int tx_error = 627 int tx_error =
758 priv->mac_type->ops->tx_status(&priv->dev->stats, 628 priv->hw->desc->tx_status(&priv->dev->stats,
759 &priv->xstats, 629 &priv->xstats, p,
760 p, ioaddr); 630 ioaddr);
761 if (likely(tx_error == 0)) { 631 if (likely(tx_error == 0)) {
762 priv->dev->stats.tx_packets++; 632 priv->dev->stats.tx_packets++;
763 priv->xstats.tx_pkt_n++; 633 priv->xstats.tx_pkt_n++;
@@ -769,7 +639,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
769 639
770 if (likely(p->des2)) 640 if (likely(p->des2))
771 dma_unmap_single(priv->device, p->des2, 641 dma_unmap_single(priv->device, p->des2,
772 priv->mac_type->ops->get_tx_len(p), 642 priv->hw->desc->get_tx_len(p),
773 DMA_TO_DEVICE); 643 DMA_TO_DEVICE);
774 if (unlikely(p->des3)) 644 if (unlikely(p->des3))
775 p->des3 = 0; 645 p->des3 = 0;
@@ -790,7 +660,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
790 priv->tx_skbuff[entry] = NULL; 660 priv->tx_skbuff[entry] = NULL;
791 } 661 }
792 662
793 priv->mac_type->ops->release_tx_desc(p); 663 priv->hw->desc->release_tx_desc(p);
794 664
795 entry = (++priv->dirty_tx) % txsize; 665 entry = (++priv->dirty_tx) % txsize;
796 } 666 }
@@ -814,7 +684,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
814 priv->tm->timer_start(tmrate); 684 priv->tm->timer_start(tmrate);
815 else 685 else
816#endif 686#endif
817 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA); 687 priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
818} 688}
819 689
820static inline void stmmac_disable_irq(struct stmmac_priv *priv) 690static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -824,7 +694,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
824 priv->tm->timer_stop(); 694 priv->tm->timer_stop();
825 else 695 else
826#endif 696#endif
827 writel(0, priv->dev->base_addr + DMA_INTR_ENA); 697 priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
828} 698}
829 699
830static int stmmac_has_work(struct stmmac_priv *priv) 700static int stmmac_has_work(struct stmmac_priv *priv)
@@ -832,7 +702,7 @@ static int stmmac_has_work(struct stmmac_priv *priv)
832 unsigned int has_work = 0; 702 unsigned int has_work = 0;
833 int rxret, tx_work = 0; 703 int rxret, tx_work = 0;
834 704
835 rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx + 705 rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
836 (priv->cur_rx % priv->dma_rx_size)); 706 (priv->cur_rx % priv->dma_rx_size));
837 707
838 if (priv->dirty_tx != priv->cur_tx) 708 if (priv->dirty_tx != priv->cur_tx)
@@ -883,12 +753,12 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
883{ 753{
884 netif_stop_queue(priv->dev); 754 netif_stop_queue(priv->dev);
885 755
886 stmmac_dma_stop_tx(priv->dev->base_addr); 756 priv->hw->dma->stop_tx(priv->dev->base_addr);
887 dma_free_tx_skbufs(priv); 757 dma_free_tx_skbufs(priv);
888 priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 758 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
889 priv->dirty_tx = 0; 759 priv->dirty_tx = 0;
890 priv->cur_tx = 0; 760 priv->cur_tx = 0;
891 stmmac_dma_start_tx(priv->dev->base_addr); 761 priv->hw->dma->start_tx(priv->dev->base_addr);
892 762
893 priv->dev->stats.tx_errors++; 763 priv->dev->stats.tx_errors++;
894 netif_wake_queue(priv->dev); 764 netif_wake_queue(priv->dev);
@@ -896,95 +766,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
896 return; 766 return;
897} 767}
898 768
899/**
900 * stmmac_dma_interrupt - Interrupt handler for the driver
901 * @dev: net device structure
902 * Description: Interrupt handler for the driver (DMA).
903 */
904static void stmmac_dma_interrupt(struct net_device *dev)
905{
906 unsigned long ioaddr = dev->base_addr;
907 struct stmmac_priv *priv = netdev_priv(dev);
908 /* read the status register (CSR5) */
909 u32 intr_status = readl(ioaddr + DMA_STATUS);
910
911 DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
912 769
913#ifdef STMMAC_DEBUG 770static void stmmac_dma_interrupt(struct stmmac_priv *priv)
914 /* It displays the DMA transmit process state (CSR5 register) */ 771{
915 if (netif_msg_tx_done(priv)) 772 unsigned long ioaddr = priv->dev->base_addr;
916 show_tx_process_state(intr_status); 773 int status;
917 if (netif_msg_rx_status(priv)) 774
918 show_rx_process_state(intr_status); 775 status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
919#endif 776 &priv->xstats);
920 /* ABNORMAL interrupts */ 777 if (likely(status == handle_tx_rx))
921 if (unlikely(intr_status & DMA_STATUS_AIS)) { 778 _stmmac_schedule(priv);
922 DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: "); 779
923 if (unlikely(intr_status & DMA_STATUS_UNF)) { 780 else if (unlikely(status == tx_hard_error_bump_tc)) {
924 DBG(intr, INFO, "transmit underflow\n"); 781 /* Try to bump up the dma threshold on this failure */
925 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 782 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
926 /* Try to bump up the threshold */ 783 tc += 64;
927 tc += 64; 784 priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
928 priv->mac_type->ops->dma_mode(ioaddr, tc, 785 priv->xstats.threshold = tc;
929 SF_DMA_MODE);
930 priv->xstats.threshold = tc;
931 }
932 stmmac_tx_err(priv);
933 priv->xstats.tx_undeflow_irq++;
934 }
935 if (unlikely(intr_status & DMA_STATUS_TJT)) {
936 DBG(intr, INFO, "transmit jabber\n");
937 priv->xstats.tx_jabber_irq++;
938 }
939 if (unlikely(intr_status & DMA_STATUS_OVF)) {
940 DBG(intr, INFO, "recv overflow\n");
941 priv->xstats.rx_overflow_irq++;
942 }
943 if (unlikely(intr_status & DMA_STATUS_RU)) {
944 DBG(intr, INFO, "receive buffer unavailable\n");
945 priv->xstats.rx_buf_unav_irq++;
946 }
947 if (unlikely(intr_status & DMA_STATUS_RPS)) {
948 DBG(intr, INFO, "receive process stopped\n");
949 priv->xstats.rx_process_stopped_irq++;
950 }
951 if (unlikely(intr_status & DMA_STATUS_RWT)) {
952 DBG(intr, INFO, "receive watchdog\n");
953 priv->xstats.rx_watchdog_irq++;
954 }
955 if (unlikely(intr_status & DMA_STATUS_ETI)) {
956 DBG(intr, INFO, "transmit early interrupt\n");
957 priv->xstats.tx_early_irq++;
958 }
959 if (unlikely(intr_status & DMA_STATUS_TPS)) {
960 DBG(intr, INFO, "transmit process stopped\n");
961 priv->xstats.tx_process_stopped_irq++;
962 stmmac_tx_err(priv);
963 }
964 if (unlikely(intr_status & DMA_STATUS_FBI)) {
965 DBG(intr, INFO, "fatal bus error\n");
966 priv->xstats.fatal_bus_error_irq++;
967 stmmac_tx_err(priv);
968 } 786 }
969 } 787 stmmac_tx_err(priv);
970 788 } else if (unlikely(status == tx_hard_error))
971 /* TX/RX NORMAL interrupts */ 789 stmmac_tx_err(priv);
972 if (intr_status & DMA_STATUS_NIS) {
973 priv->xstats.normal_irq_n++;
974 if (likely((intr_status & DMA_STATUS_RI) ||
975 (intr_status & (DMA_STATUS_TI))))
976 _stmmac_schedule(priv);
977 }
978
979 /* Optional hardware blocks, interrupts should be disabled */
980 if (unlikely(intr_status &
981 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
982 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
983
984 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
985 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
986
987 DBG(intr, INFO, "\n\n");
988 790
989 return; 791 return;
990} 792}
@@ -1058,17 +860,20 @@ static int stmmac_open(struct net_device *dev)
1058 init_dma_desc_rings(dev); 860 init_dma_desc_rings(dev);
1059 861
1060 /* DMA initialization and SW reset */ 862 /* DMA initialization and SW reset */
1061 if (unlikely(priv->mac_type->ops->dma_init(ioaddr, 863 if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
1062 priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) { 864 priv->dma_rx_phy) < 0)) {
1063 865
1064 pr_err("%s: DMA initialization failed\n", __func__); 866 pr_err("%s: DMA initialization failed\n", __func__);
1065 return -1; 867 return -1;
1066 } 868 }
1067 869
1068 /* Copy the MAC addr into the HW */ 870 /* Copy the MAC addr into the HW */
1069 priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0); 871 priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
872 /* If required, perform hw setup of the bus. */
873 if (priv->bus_setup)
874 priv->bus_setup(ioaddr);
1070 /* Initialize the MAC Core */ 875 /* Initialize the MAC Core */
1071 priv->mac_type->ops->core_init(ioaddr); 876 priv->hw->mac->core_init(ioaddr);
1072 877
1073 priv->shutdown = 0; 878 priv->shutdown = 0;
1074 879
@@ -1089,16 +894,16 @@ static int stmmac_open(struct net_device *dev)
1089 894
1090 /* Start the ball rolling... */ 895 /* Start the ball rolling... */
1091 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 896 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
1092 stmmac_dma_start_tx(ioaddr); 897 priv->hw->dma->start_tx(ioaddr);
1093 stmmac_dma_start_rx(ioaddr); 898 priv->hw->dma->start_rx(ioaddr);
1094 899
1095#ifdef CONFIG_STMMAC_TIMER 900#ifdef CONFIG_STMMAC_TIMER
1096 priv->tm->timer_start(tmrate); 901 priv->tm->timer_start(tmrate);
1097#endif 902#endif
1098 /* Dump DMA/MAC registers */ 903 /* Dump DMA/MAC registers */
1099 if (netif_msg_hw(priv)) { 904 if (netif_msg_hw(priv)) {
1100 priv->mac_type->ops->dump_mac_regs(ioaddr); 905 priv->hw->mac->dump_regs(ioaddr);
1101 priv->mac_type->ops->dump_dma_regs(ioaddr); 906 priv->hw->dma->dump_regs(ioaddr);
1102 } 907 }
1103 908
1104 if (priv->phydev) 909 if (priv->phydev)
@@ -1142,8 +947,8 @@ static int stmmac_release(struct net_device *dev)
1142 free_irq(dev->irq, dev); 947 free_irq(dev->irq, dev);
1143 948
1144 /* Stop TX/RX DMA and clear the descriptors */ 949 /* Stop TX/RX DMA and clear the descriptors */
1145 stmmac_dma_stop_tx(dev->base_addr); 950 priv->hw->dma->stop_tx(dev->base_addr);
1146 stmmac_dma_stop_rx(dev->base_addr); 951 priv->hw->dma->stop_rx(dev->base_addr);
1147 952
1148 /* Release and free the Rx/Tx resources */ 953 /* Release and free the Rx/Tx resources */
1149 free_dma_desc_resources(priv); 954 free_dma_desc_resources(priv);
@@ -1214,8 +1019,8 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1214 desc->des2 = dma_map_single(priv->device, skb->data, 1019 desc->des2 = dma_map_single(priv->device, skb->data,
1215 BUF_SIZE_8KiB, DMA_TO_DEVICE); 1020 BUF_SIZE_8KiB, DMA_TO_DEVICE);
1216 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1021 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1217 priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB, 1022 priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
1218 csum_insertion); 1023 csum_insertion);
1219 1024
1220 entry = (++priv->cur_tx) % txsize; 1025 entry = (++priv->cur_tx) % txsize;
1221 desc = priv->dma_tx + entry; 1026 desc = priv->dma_tx + entry;
@@ -1224,16 +1029,16 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1224 skb->data + BUF_SIZE_8KiB, 1029 skb->data + BUF_SIZE_8KiB,
1225 buf2_size, DMA_TO_DEVICE); 1030 buf2_size, DMA_TO_DEVICE);
1226 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1031 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1227 priv->mac_type->ops->prepare_tx_desc(desc, 0, 1032 priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
1228 buf2_size, csum_insertion); 1033 csum_insertion);
1229 priv->mac_type->ops->set_tx_owner(desc); 1034 priv->hw->desc->set_tx_owner(desc);
1230 priv->tx_skbuff[entry] = NULL; 1035 priv->tx_skbuff[entry] = NULL;
1231 } else { 1036 } else {
1232 desc->des2 = dma_map_single(priv->device, skb->data, 1037 desc->des2 = dma_map_single(priv->device, skb->data,
1233 nopaged_len, DMA_TO_DEVICE); 1038 nopaged_len, DMA_TO_DEVICE);
1234 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 1039 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1235 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, 1040 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1236 csum_insertion); 1041 csum_insertion);
1237 } 1042 }
1238 return entry; 1043 return entry;
1239} 1044}
@@ -1301,8 +1106,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1301 unsigned int nopaged_len = skb_headlen(skb); 1106 unsigned int nopaged_len = skb_headlen(skb);
1302 desc->des2 = dma_map_single(priv->device, skb->data, 1107 desc->des2 = dma_map_single(priv->device, skb->data,
1303 nopaged_len, DMA_TO_DEVICE); 1108 nopaged_len, DMA_TO_DEVICE);
1304 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, 1109 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1305 csum_insertion); 1110 csum_insertion);
1306 } 1111 }
1307 1112
1308 for (i = 0; i < nfrags; i++) { 1113 for (i = 0; i < nfrags; i++) {
@@ -1317,21 +1122,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1317 frag->page_offset, 1122 frag->page_offset,
1318 len, DMA_TO_DEVICE); 1123 len, DMA_TO_DEVICE);
1319 priv->tx_skbuff[entry] = NULL; 1124 priv->tx_skbuff[entry] = NULL;
1320 priv->mac_type->ops->prepare_tx_desc(desc, 0, len, 1125 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
1321 csum_insertion); 1126 priv->hw->desc->set_tx_owner(desc);
1322 priv->mac_type->ops->set_tx_owner(desc);
1323 } 1127 }
1324 1128
1325 /* Interrupt on completition only for the latest segment */ 1129 /* Interrupt on completition only for the latest segment */
1326 priv->mac_type->ops->close_tx_desc(desc); 1130 priv->hw->desc->close_tx_desc(desc);
1327 1131
1328#ifdef CONFIG_STMMAC_TIMER 1132#ifdef CONFIG_STMMAC_TIMER
1329 /* Clean IC while using timer */ 1133 /* Clean IC while using timer */
1330 if (likely(priv->tm->enable)) 1134 if (likely(priv->tm->enable))
1331 priv->mac_type->ops->clear_tx_ic(desc); 1135 priv->hw->desc->clear_tx_ic(desc);
1332#endif 1136#endif
1333 /* To avoid raise condition */ 1137 /* To avoid raise condition */
1334 priv->mac_type->ops->set_tx_owner(first); 1138 priv->hw->desc->set_tx_owner(first);
1335 1139
1336 priv->cur_tx++; 1140 priv->cur_tx++;
1337 1141
@@ -1353,8 +1157,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1353 1157
1354 dev->stats.tx_bytes += skb->len; 1158 dev->stats.tx_bytes += skb->len;
1355 1159
1356 /* CSR1 enables the transmit DMA to check for new descriptor */ 1160 priv->hw->dma->enable_dma_transmission(dev->base_addr);
1357 writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
1358 1161
1359 return NETDEV_TX_OK; 1162 return NETDEV_TX_OK;
1360} 1163}
@@ -1391,7 +1194,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1391 } 1194 }
1392 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1195 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1393 } 1196 }
1394 priv->mac_type->ops->set_rx_owner(p + entry); 1197 priv->hw->desc->set_rx_owner(p + entry);
1395 } 1198 }
1396 return; 1199 return;
1397} 1200}
@@ -1412,7 +1215,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1412 } 1215 }
1413#endif 1216#endif
1414 count = 0; 1217 count = 0;
1415 while (!priv->mac_type->ops->get_rx_owner(p)) { 1218 while (!priv->hw->desc->get_rx_owner(p)) {
1416 int status; 1219 int status;
1417 1220
1418 if (count >= limit) 1221 if (count >= limit)
@@ -1425,15 +1228,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1425 prefetch(p_next); 1228 prefetch(p_next);
1426 1229
1427 /* read the status of the incoming frame */ 1230 /* read the status of the incoming frame */
1428 status = (priv->mac_type->ops->rx_status(&priv->dev->stats, 1231 status = (priv->hw->desc->rx_status(&priv->dev->stats,
1429 &priv->xstats, p)); 1232 &priv->xstats, p));
1430 if (unlikely(status == discard_frame)) 1233 if (unlikely(status == discard_frame))
1431 priv->dev->stats.rx_errors++; 1234 priv->dev->stats.rx_errors++;
1432 else { 1235 else {
1433 struct sk_buff *skb; 1236 struct sk_buff *skb;
1434 /* Length should omit the CRC */ 1237 /* Length should omit the CRC */
1435 int frame_len = 1238 int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
1436 priv->mac_type->ops->get_rx_frame_len(p) - 4;
1437 1239
1438#ifdef STMMAC_RX_DEBUG 1240#ifdef STMMAC_RX_DEBUG
1439 if (frame_len > ETH_FRAME_LEN) 1241 if (frame_len > ETH_FRAME_LEN)
@@ -1569,7 +1371,7 @@ static void stmmac_multicast_list(struct net_device *dev)
1569 struct stmmac_priv *priv = netdev_priv(dev); 1371 struct stmmac_priv *priv = netdev_priv(dev);
1570 1372
1571 spin_lock(&priv->lock); 1373 spin_lock(&priv->lock);
1572 priv->mac_type->ops->set_filter(dev); 1374 priv->hw->mac->set_filter(dev);
1573 spin_unlock(&priv->lock); 1375 spin_unlock(&priv->lock);
1574 return; 1376 return;
1575} 1377}
@@ -1623,9 +1425,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1623 if (priv->is_gmac) { 1425 if (priv->is_gmac) {
1624 unsigned long ioaddr = dev->base_addr; 1426 unsigned long ioaddr = dev->base_addr;
1625 /* To handle GMAC own interrupts */ 1427 /* To handle GMAC own interrupts */
1626 priv->mac_type->ops->host_irq_status(ioaddr); 1428 priv->hw->mac->host_irq_status(ioaddr);
1627 } 1429 }
1628 stmmac_dma_interrupt(dev); 1430
1431 stmmac_dma_interrupt(priv);
1629 1432
1630 return IRQ_HANDLED; 1433 return IRQ_HANDLED;
1631} 1434}
@@ -1744,7 +1547,7 @@ static int stmmac_probe(struct net_device *dev)
1744 netif_napi_add(dev, &priv->napi, stmmac_poll, 64); 1547 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1745 1548
1746 /* Get the MAC address */ 1549 /* Get the MAC address */
1747 priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0); 1550 priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
1748 1551
1749 if (!is_valid_ether_addr(dev->dev_addr)) 1552 if (!is_valid_ether_addr(dev->dev_addr))
1750 pr_warning("\tno valid MAC address;" 1553 pr_warning("\tno valid MAC address;"
@@ -1779,16 +1582,16 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1779 struct mac_device_info *device; 1582 struct mac_device_info *device;
1780 1583
1781 if (priv->is_gmac) 1584 if (priv->is_gmac)
1782 device = gmac_setup(ioaddr); 1585 device = dwmac1000_setup(ioaddr);
1783 else 1586 else
1784 device = mac100_setup(ioaddr); 1587 device = dwmac100_setup(ioaddr);
1785 1588
1786 if (!device) 1589 if (!device)
1787 return -ENOMEM; 1590 return -ENOMEM;
1788 1591
1789 priv->mac_type = device; 1592 priv->hw = device;
1790 1593
1791 priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */ 1594 priv->wolenabled = priv->hw->pmt; /* PMT supported */
1792 if (priv->wolenabled == PMT_SUPPORTED) 1595 if (priv->wolenabled == PMT_SUPPORTED)
1793 priv->wolopts = WAKE_MAGIC; /* Magic Frame */ 1596 priv->wolopts = WAKE_MAGIC; /* Magic Frame */
1794 1597
@@ -1797,8 +1600,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1797 1600
1798static int stmmacphy_dvr_probe(struct platform_device *pdev) 1601static int stmmacphy_dvr_probe(struct platform_device *pdev)
1799{ 1602{
1800 struct plat_stmmacphy_data *plat_dat; 1603 struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
1801 plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
1802 1604
1803 pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n", 1605 pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
1804 plat_dat->bus_id); 1606 plat_dat->bus_id);
@@ -1830,9 +1632,7 @@ static struct platform_driver stmmacphy_driver = {
1830static int stmmac_associate_phy(struct device *dev, void *data) 1632static int stmmac_associate_phy(struct device *dev, void *data)
1831{ 1633{
1832 struct stmmac_priv *priv = (struct stmmac_priv *)data; 1634 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1833 struct plat_stmmacphy_data *plat_dat; 1635 struct plat_stmmacphy_data *plat_dat = dev->platform_data;
1834
1835 plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
1836 1636
1837 DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__, 1637 DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
1838 plat_dat->bus_id); 1638 plat_dat->bus_id);
@@ -1922,7 +1722,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1922 priv = netdev_priv(ndev); 1722 priv = netdev_priv(ndev);
1923 priv->device = &(pdev->dev); 1723 priv->device = &(pdev->dev);
1924 priv->dev = ndev; 1724 priv->dev = ndev;
1925 plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data); 1725 plat_dat = pdev->dev.platform_data;
1926 priv->bus_id = plat_dat->bus_id; 1726 priv->bus_id = plat_dat->bus_id;
1927 priv->pbl = plat_dat->pbl; /* TLI */ 1727 priv->pbl = plat_dat->pbl; /* TLI */
1928 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ 1728 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
@@ -1932,6 +1732,11 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1932 /* Set the I/O base addr */ 1732 /* Set the I/O base addr */
1933 ndev->base_addr = (unsigned long)addr; 1733 ndev->base_addr = (unsigned long)addr;
1934 1734
1735 /* Verify embedded resource for the platform */
1736 ret = stmmac_claim_resource(pdev);
1737 if (ret < 0)
1738 goto out;
1739
1935 /* MAC HW revice detection */ 1740 /* MAC HW revice detection */
1936 ret = stmmac_mac_device_setup(ndev); 1741 ret = stmmac_mac_device_setup(ndev);
1937 if (ret < 0) 1742 if (ret < 0)
@@ -1952,6 +1757,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1952 } 1757 }
1953 1758
1954 priv->fix_mac_speed = plat_dat->fix_mac_speed; 1759 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1760 priv->bus_setup = plat_dat->bus_setup;
1955 priv->bsp_priv = plat_dat->bsp_priv; 1761 priv->bsp_priv = plat_dat->bsp_priv;
1956 1762
1957 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1763 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
@@ -1986,12 +1792,13 @@ out:
1986static int stmmac_dvr_remove(struct platform_device *pdev) 1792static int stmmac_dvr_remove(struct platform_device *pdev)
1987{ 1793{
1988 struct net_device *ndev = platform_get_drvdata(pdev); 1794 struct net_device *ndev = platform_get_drvdata(pdev);
1795 struct stmmac_priv *priv = netdev_priv(ndev);
1989 struct resource *res; 1796 struct resource *res;
1990 1797
1991 pr_info("%s:\n\tremoving driver", __func__); 1798 pr_info("%s:\n\tremoving driver", __func__);
1992 1799
1993 stmmac_dma_stop_rx(ndev->base_addr); 1800 priv->hw->dma->stop_rx(ndev->base_addr);
1994 stmmac_dma_stop_tx(ndev->base_addr); 1801 priv->hw->dma->stop_tx(ndev->base_addr);
1995 1802
1996 stmmac_mac_disable_rx(ndev->base_addr); 1803 stmmac_mac_disable_rx(ndev->base_addr);
1997 stmmac_mac_disable_tx(ndev->base_addr); 1804 stmmac_mac_disable_tx(ndev->base_addr);
@@ -2038,21 +1845,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
2038 napi_disable(&priv->napi); 1845 napi_disable(&priv->napi);
2039 1846
2040 /* Stop TX/RX DMA */ 1847 /* Stop TX/RX DMA */
2041 stmmac_dma_stop_tx(dev->base_addr); 1848 priv->hw->dma->stop_tx(dev->base_addr);
2042 stmmac_dma_stop_rx(dev->base_addr); 1849 priv->hw->dma->stop_rx(dev->base_addr);
2043 /* Clear the Rx/Tx descriptors */ 1850 /* Clear the Rx/Tx descriptors */
2044 priv->mac_type->ops->init_rx_desc(priv->dma_rx, 1851 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
2045 priv->dma_rx_size, dis_ic); 1852 dis_ic);
2046 priv->mac_type->ops->init_tx_desc(priv->dma_tx, 1853 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
2047 priv->dma_tx_size);
2048 1854
2049 stmmac_mac_disable_tx(dev->base_addr); 1855 stmmac_mac_disable_tx(dev->base_addr);
2050 1856
2051 if (device_may_wakeup(&(pdev->dev))) { 1857 if (device_may_wakeup(&(pdev->dev))) {
2052 /* Enable Power down mode by programming the PMT regs */ 1858 /* Enable Power down mode by programming the PMT regs */
2053 if (priv->wolenabled == PMT_SUPPORTED) 1859 if (priv->wolenabled == PMT_SUPPORTED)
2054 priv->mac_type->ops->pmt(dev->base_addr, 1860 priv->hw->mac->pmt(dev->base_addr,
2055 priv->wolopts); 1861 priv->wolopts);
2056 } else { 1862 } else {
2057 stmmac_mac_disable_rx(dev->base_addr); 1863 stmmac_mac_disable_rx(dev->base_addr);
2058 } 1864 }
@@ -2093,15 +1899,15 @@ static int stmmac_resume(struct platform_device *pdev)
2093 * from another devices (e.g. serial console). */ 1899 * from another devices (e.g. serial console). */
2094 if (device_may_wakeup(&(pdev->dev))) 1900 if (device_may_wakeup(&(pdev->dev)))
2095 if (priv->wolenabled == PMT_SUPPORTED) 1901 if (priv->wolenabled == PMT_SUPPORTED)
2096 priv->mac_type->ops->pmt(dev->base_addr, 0); 1902 priv->hw->mac->pmt(dev->base_addr, 0);
2097 1903
2098 netif_device_attach(dev); 1904 netif_device_attach(dev);
2099 1905
2100 /* Enable the MAC and DMA */ 1906 /* Enable the MAC and DMA */
2101 stmmac_mac_enable_rx(ioaddr); 1907 stmmac_mac_enable_rx(ioaddr);
2102 stmmac_mac_enable_tx(ioaddr); 1908 stmmac_mac_enable_tx(ioaddr);
2103 stmmac_dma_start_tx(ioaddr); 1909 priv->hw->dma->start_tx(ioaddr);
2104 stmmac_dma_start_rx(ioaddr); 1910 priv->hw->dma->start_rx(ioaddr);
2105 1911
2106#ifdef CONFIG_STMMAC_TIMER 1912#ifdef CONFIG_STMMAC_TIMER
2107 priv->tm->timer_start(tmrate); 1913 priv->tm->timer_start(tmrate);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 8498552a22f..fffe1d037fe 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -24,7 +24,6 @@
24 Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> 24 Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/ 25*******************************************************************************/
26 26
27#include <linux/netdevice.h>
28#include <linux/mii.h> 27#include <linux/mii.h>
29#include <linux/phy.h> 28#include <linux/phy.h>
30 29
@@ -48,8 +47,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
48 struct net_device *ndev = bus->priv; 47 struct net_device *ndev = bus->priv;
49 struct stmmac_priv *priv = netdev_priv(ndev); 48 struct stmmac_priv *priv = netdev_priv(ndev);
50 unsigned long ioaddr = ndev->base_addr; 49 unsigned long ioaddr = ndev->base_addr;
51 unsigned int mii_address = priv->mac_type->hw.mii.addr; 50 unsigned int mii_address = priv->hw->mii.addr;
52 unsigned int mii_data = priv->mac_type->hw.mii.data; 51 unsigned int mii_data = priv->hw->mii.data;
53 52
54 int data; 53 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
@@ -80,8 +79,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
80 struct net_device *ndev = bus->priv; 79 struct net_device *ndev = bus->priv;
81 struct stmmac_priv *priv = netdev_priv(ndev); 80 struct stmmac_priv *priv = netdev_priv(ndev);
82 unsigned long ioaddr = ndev->base_addr; 81 unsigned long ioaddr = ndev->base_addr;
83 unsigned int mii_address = priv->mac_type->hw.mii.addr; 82 unsigned int mii_address = priv->hw->mii.addr;
84 unsigned int mii_data = priv->mac_type->hw.mii.data; 83 unsigned int mii_data = priv->hw->mii.data;
85 84
86 u16 value = 85 u16 value =
87 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 86 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
@@ -112,7 +111,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
112 struct net_device *ndev = bus->priv; 111 struct net_device *ndev = bus->priv;
113 struct stmmac_priv *priv = netdev_priv(ndev); 112 struct stmmac_priv *priv = netdev_priv(ndev);
114 unsigned long ioaddr = ndev->base_addr; 113 unsigned long ioaddr = ndev->base_addr;
115 unsigned int mii_address = priv->mac_type->hw.mii.addr; 114 unsigned int mii_address = priv->hw->mii.addr;
116 115
117 if (priv->phy_reset) { 116 if (priv->phy_reset) {
118 pr_debug("stmmac_mdio_reset: calling phy_reset\n"); 117 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d58e1891ca6..0c972e560cf 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -206,7 +206,7 @@ IVc. Errata
206#define USE_IO_OPS 1 206#define USE_IO_OPS 1
207#endif 207#endif
208 208
209static const struct pci_device_id sundance_pci_tbl[] = { 209static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, 210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, 211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, 212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b571a1babab..b55ceb88d93 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -107,7 +107,7 @@ MODULE_LICENSE("GPL");
107#define GEM_MODULE_NAME "gem" 107#define GEM_MODULE_NAME "gem"
108#define PFX GEM_MODULE_NAME ": " 108#define PFX GEM_MODULE_NAME ": "
109 109
110static struct pci_device_id gem_pci_tbl[] = { 110static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
113 113
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 6762f1c6ec8..76ccd31cbf5 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3211,7 +3211,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
3211 dev_set_drvdata(&pdev->dev, NULL); 3211 dev_set_drvdata(&pdev->dev, NULL);
3212} 3212}
3213 3213
3214static struct pci_device_id happymeal_pci_ids[] = { 3214static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
3215 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, 3215 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3216 { } /* Terminating entry */ 3216 { } /* Terminating entry */
3217}; 3217};
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index bc74db0d12f..d65764ea1d8 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1062,10 +1062,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
1062 goto err_out_free_dev; 1062 goto err_out_free_dev;
1063 } 1063 }
1064 1064
1065 printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); 1065 printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr);
1066
1067 for (i = 0; i < 6; i++)
1068 printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
1069 1066
1070 list_add(&vp->list, &vnet_list); 1067 list_add(&vp->list, &vnet_list);
1071 1068
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5..033408f589f 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -65,7 +65,7 @@ static const struct {
65 { "TOSHIBA TC35815/TX4939" }, 65 { "TOSHIBA TC35815/TX4939" },
66}; 66};
67 67
68static const struct pci_device_id tc35815_pci_tbl[] = { 68static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
69 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF }, 69 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
70 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU }, 70 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
71 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, 71 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 80b404f2b93..b907bee31fd 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -64,7 +64,7 @@
64 64
65#include "tehuti.h" 65#include "tehuti.h"
66 66
67static struct pci_device_id __devinitdata bdx_pci_tbl[] = { 67static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
68 {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 68 {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
69 {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 69 {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70 {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 70 {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7f82b0238e0..7195bdec17f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -174,7 +174,7 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
174module_param(tg3_debug, int, 0); 174module_param(tg3_debug, int, 0);
175MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 175MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
176 176
177static struct pci_device_id tg3_pci_tbl[] = { 177static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, 180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
@@ -244,6 +244,12 @@ static struct pci_device_id tg3_pci_tbl[] = {
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, 246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
247 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
248 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 255 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1564,7 +1570,9 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1564{ 1570{
1565 u32 reg; 1571 u32 reg;
1566 1572
1567 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 1573 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1574 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1575 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1568 return; 1576 return;
1569 1577
1570 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { 1578 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
@@ -1939,6 +1947,10 @@ static int tg3_phy_reset(struct tg3 *tp)
1939 } 1947 }
1940 } 1948 }
1941 1949
1950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1951 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1952 return 0;
1953
1942 tg3_phy_apply_otp(tp); 1954 tg3_phy_apply_otp(tp);
1943 1955
1944 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) 1956 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
@@ -2019,7 +2031,9 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2019{ 2031{
2020 struct tg3 *tp_peer = tp; 2032 struct tg3 *tp_peer = tp;
2021 2033
2022 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0) 2034 /* The GPIOs do something completely different on 57765. */
2035 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2023 return; 2037 return;
2024 2038
2025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
@@ -7439,10 +7453,13 @@ static void tg3_rings_reset(struct tg3 *tp)
7439 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { 7453 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7440 tp->napi[i].tx_prod = 0; 7454 tp->napi[i].tx_prod = 0;
7441 tp->napi[i].tx_cons = 0; 7455 tp->napi[i].tx_cons = 0;
7442 tw32_mailbox(tp->napi[i].prodmbox, 0); 7456 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7457 tw32_mailbox(tp->napi[i].prodmbox, 0);
7443 tw32_rx_mbox(tp->napi[i].consmbox, 0); 7458 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7444 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 7459 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7445 } 7460 }
7461 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7462 tw32_mailbox(tp->napi[0].prodmbox, 0);
7446 } else { 7463 } else {
7447 tp->napi[0].tx_prod = 0; 7464 tp->napi[0].tx_prod = 0;
7448 tp->napi[0].tx_cons = 0; 7465 tp->napi[0].tx_cons = 0;
@@ -7574,6 +7591,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7574 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 7591 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7575 } 7592 }
7576 7593
7594 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7595 u32 grc_mode = tr32(GRC_MODE);
7596
7597 /* Access the lower 1K of PL PCIE block registers. */
7598 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7599 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7600
7601 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7602 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7603 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7604
7605 tw32(GRC_MODE, grc_mode);
7606 }
7607
7577 /* This works around an issue with Athlon chipsets on 7608 /* This works around an issue with Athlon chipsets on
7578 * B3 tigon3 silicon. This bit has no effect on any 7609 * B3 tigon3 silicon. This bit has no effect on any
7579 * other revision. But do not set this on PCI Express 7610 * other revision. But do not set this on PCI Express
@@ -7772,7 +7803,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7772 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7803 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7773 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7804 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7774 BDINFO_FLAGS_USE_EXT_RECV); 7805 BDINFO_FLAGS_USE_EXT_RECV);
7775 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7806 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7776 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7807 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7777 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7808 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7778 } else { 7809 } else {
@@ -8143,7 +8174,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8143 /* Prevent chip from dropping frames when flow control 8174 /* Prevent chip from dropping frames when flow control
8144 * is enabled. 8175 * is enabled.
8145 */ 8176 */
8146 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2); 8177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8178 val = 1;
8179 else
8180 val = 2;
8181 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8147 8182
8148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 8183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8149 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 8184 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
@@ -10640,12 +10675,27 @@ static int tg3_test_memory(struct tg3 *tp)
10640 { 0x00008000, 0x01000}, 10675 { 0x00008000, 0x01000},
10641 { 0x00010000, 0x01000}, 10676 { 0x00010000, 0x01000},
10642 { 0xffffffff, 0x00000} 10677 { 0xffffffff, 0x00000}
10678 }, mem_tbl_5717[] = {
10679 { 0x00000200, 0x00008},
10680 { 0x00010000, 0x0a000},
10681 { 0x00020000, 0x13c00},
10682 { 0xffffffff, 0x00000}
10683 }, mem_tbl_57765[] = {
10684 { 0x00000200, 0x00008},
10685 { 0x00004000, 0x00800},
10686 { 0x00006000, 0x09800},
10687 { 0x00010000, 0x0a000},
10688 { 0xffffffff, 0x00000}
10643 }; 10689 };
10644 struct mem_entry *mem_tbl; 10690 struct mem_entry *mem_tbl;
10645 int err = 0; 10691 int err = 0;
10646 int i; 10692 int i;
10647 10693
10648 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 10694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
10695 mem_tbl = mem_tbl_5717;
10696 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10697 mem_tbl = mem_tbl_57765;
10698 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10649 mem_tbl = mem_tbl_5755; 10699 mem_tbl = mem_tbl_5755;
10650 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 10700 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10651 mem_tbl = mem_tbl_5906; 10701 mem_tbl = mem_tbl_5906;
@@ -13102,6 +13152,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13102 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || 13152 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13103 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) 13153 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13104 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; 13154 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13155 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13156 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13105 } 13157 }
13106 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { 13158 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13107 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13159 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
@@ -13290,7 +13342,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13290 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 13342 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13291 13343
13292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 13345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13294 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 13347 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13295 13348
13296 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || 13349 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14086,9 +14139,22 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
14086 14139
14087static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14140static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14088{ 14141{
14089 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS && 14142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14090 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 14143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { 14144 tp->bufmgr_config.mbuf_read_dma_low_water =
14145 DEFAULT_MB_RDMA_LOW_WATER_5705;
14146 tp->bufmgr_config.mbuf_mac_rx_low_water =
14147 DEFAULT_MB_MACRX_LOW_WATER_57765;
14148 tp->bufmgr_config.mbuf_high_water =
14149 DEFAULT_MB_HIGH_WATER_57765;
14150
14151 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14152 DEFAULT_MB_RDMA_LOW_WATER_5705;
14153 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14154 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14155 tp->bufmgr_config.mbuf_high_water_jumbo =
14156 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14157 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14092 tp->bufmgr_config.mbuf_read_dma_low_water = 14158 tp->bufmgr_config.mbuf_read_dma_low_water =
14093 DEFAULT_MB_RDMA_LOW_WATER_5705; 14159 DEFAULT_MB_RDMA_LOW_WATER_5705;
14094 tp->bufmgr_config.mbuf_mac_rx_low_water = 14160 tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14148,7 +14214,9 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
14148 case PHY_ID_BCM5756: return "5722/5756"; 14214 case PHY_ID_BCM5756: return "5722/5756";
14149 case PHY_ID_BCM5906: return "5906"; 14215 case PHY_ID_BCM5906: return "5906";
14150 case PHY_ID_BCM5761: return "5761"; 14216 case PHY_ID_BCM5761: return "5761";
14151 case PHY_ID_BCM5717: return "5717"; 14217 case PHY_ID_BCM5718C: return "5718C";
14218 case PHY_ID_BCM5718S: return "5718S";
14219 case PHY_ID_BCM57765: return "57765";
14152 case PHY_ID_BCM8002: return "8002/serdes"; 14220 case PHY_ID_BCM8002: return "8002/serdes";
14153 case 0: return "serdes"; 14221 case 0: return "serdes";
14154 default: return "unknown"; 14222 default: return "unknown";
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8a167912902..e7f6214a168 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1206,14 +1206,18 @@
1206#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020 1206#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020
1207#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010 1207#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010
1208#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004 1208#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004
1209#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
1209#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098 1210#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
1210#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b 1211#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
1212#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
1211#define BUFMGR_MB_HIGH_WATER 0x00004418 1213#define BUFMGR_MB_HIGH_WATER 0x00004418
1212#define DEFAULT_MB_HIGH_WATER 0x00000060 1214#define DEFAULT_MB_HIGH_WATER 0x00000060
1213#define DEFAULT_MB_HIGH_WATER_5705 0x00000060 1215#define DEFAULT_MB_HIGH_WATER_5705 0x00000060
1214#define DEFAULT_MB_HIGH_WATER_5906 0x00000010 1216#define DEFAULT_MB_HIGH_WATER_5906 0x00000010
1217#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0
1215#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c 1218#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c
1216#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096 1219#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
1220#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
1217#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c 1221#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c
1218#define BUFMGR_MB_ALLOC_BIT 0x10000000 1222#define BUFMGR_MB_ALLOC_BIT 0x10000000
1219#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420 1223#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420
@@ -1543,6 +1547,8 @@
1543#define GRC_MODE_HOST_SENDBDS 0x00020000 1547#define GRC_MODE_HOST_SENDBDS 0x00020000
1544#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 1548#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
1545#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 1549#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
1550#define GRC_MODE_PCIE_TL_SEL 0x00000000
1551#define GRC_MODE_PCIE_PL_SEL 0x00400000
1546#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000 1552#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
1547#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000 1553#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
1548#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000 1554#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
@@ -1550,7 +1556,13 @@
1550#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000 1556#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000
1551#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000 1557#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000
1552#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000 1558#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000
1559#define GRC_MODE_PCIE_DL_SEL 0x20000000
1553#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000 1560#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000
1561#define GRC_MODE_PCIE_HI_1K_EN 0x80000000
1562#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \
1563 GRC_MODE_PCIE_PL_SEL | \
1564 GRC_MODE_PCIE_DL_SEL | \
1565 GRC_MODE_PCIE_HI_1K_EN)
1554#define GRC_MISC_CFG 0x00006804 1566#define GRC_MISC_CFG 0x00006804
1555#define GRC_MISC_CFG_CORECLK_RESET 0x00000001 1567#define GRC_MISC_CFG_CORECLK_RESET 0x00000001
1556#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe 1568#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe
@@ -1804,6 +1816,11 @@
1804/* 0x7e74 --> 0x8000 unused */ 1816/* 0x7e74 --> 0x8000 unused */
1805 1817
1806 1818
1819/* Alternate PCIE definitions */
1820#define TG3_PCIE_TLDLPL_PORT 0x00007c00
1821#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
1822#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
1823
1807/* OTP bit definitions */ 1824/* OTP bit definitions */
1808#define TG3_OTP_AGCTGT_MASK 0x000000e0 1825#define TG3_OTP_AGCTGT_MASK 0x000000e0
1809#define TG3_OTP_AGCTGT_SHIFT 1 1826#define TG3_OTP_AGCTGT_SHIFT 1
@@ -2812,6 +2829,7 @@ struct tg3 {
2812#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000 2829#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
2813#define TG3_FLG3_SHORT_DMA_BUG 0x00200000 2830#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
2814#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 2831#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
2832#define TG3_FLG3_L1PLLPD_EN 0x00800000
2815 2833
2816 struct timer_list timer; 2834 struct timer_list timer;
2817 u16 timer_counter; 2835 u16 timer_counter;
@@ -2878,7 +2896,9 @@ struct tg3 {
2878#define PHY_ID_BCM5756 0xbc050ed0 2896#define PHY_ID_BCM5756 0xbc050ed0
2879#define PHY_ID_BCM5784 0xbc050fa0 2897#define PHY_ID_BCM5784 0xbc050fa0
2880#define PHY_ID_BCM5761 0xbc050fd0 2898#define PHY_ID_BCM5761 0xbc050fd0
2881#define PHY_ID_BCM5717 0x5c0d8a00 2899#define PHY_ID_BCM5718C 0x5c0d8a00
2900#define PHY_ID_BCM5718S 0xbc050ff0
2901#define PHY_ID_BCM57765 0x5c0d8a40
2882#define PHY_ID_BCM5906 0xdc00ac40 2902#define PHY_ID_BCM5906 0xdc00ac40
2883#define PHY_ID_BCM8002 0x60010140 2903#define PHY_ID_BCM8002 0x60010140
2884#define PHY_ID_INVALID 0xffffffff 2904#define PHY_ID_INVALID 0xffffffff
@@ -2921,7 +2941,8 @@ struct tg3 {
2921 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ 2941 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
2922 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \ 2942 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
2923 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \ 2943 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
2924 (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002) 2944 (X) == PHY_ID_BCM5718C || (X) == PHY_ID_BCM5718S || \
2945 (X) == PHY_ID_BCM57765 || (X) == PHY_ID_BCM8002)
2925 2946
2926 struct tg3_hw_stats *hw_stats; 2947 struct tg3_hw_stats *hw_stats;
2927 dma_addr_t stats_mapping; 2948 dma_addr_t stats_mapping;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index fabaeffb315..3ec31dce99f 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -254,7 +254,7 @@ static struct board {
254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
255}; 255};
256 256
257static struct pci_device_id tlan_pci_tbl[] = { 257static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, 258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, 260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
@@ -338,7 +338,7 @@ static int TLan_PhyInternalService( struct net_device * );
338static int TLan_PhyDp83840aCheck( struct net_device * ); 338static int TLan_PhyDp83840aCheck( struct net_device * );
339*/ 339*/
340 340
341static int TLan_MiiReadReg( struct net_device *, u16, u16, u16 * ); 341static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
342static void TLan_MiiSendData( u16, u32, unsigned ); 342static void TLan_MiiSendData( u16, u32, unsigned );
343static void TLan_MiiSync( u16 ); 343static void TLan_MiiSync( u16 );
344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 ); 344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
@@ -2204,7 +2204,7 @@ TLan_ResetAdapter( struct net_device *dev )
2204 u32 data; 2204 u32 data;
2205 u8 data8; 2205 u8 data8;
2206 2206
2207 priv->tlanFullDuplex = FALSE; 2207 priv->tlanFullDuplex = false;
2208 priv->phyOnline=0; 2208 priv->phyOnline=0;
2209 netif_carrier_off(dev); 2209 netif_carrier_off(dev);
2210 2210
@@ -2259,7 +2259,7 @@ TLan_ResetAdapter( struct net_device *dev )
2259 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a ); 2259 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
2260 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2260 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
2261 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 ); 2261 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
2262 priv->tlanFullDuplex = TRUE; 2262 priv->tlanFullDuplex = true;
2263 } else { 2263 } else {
2264 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 ); 2264 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
2265 } 2265 }
@@ -2651,14 +2651,14 @@ static void TLan_PhyStartLink( struct net_device *dev )
2651 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000); 2651 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
2652 } else if ( priv->speed == TLAN_SPEED_10 && 2652 } else if ( priv->speed == TLAN_SPEED_10 &&
2653 priv->duplex == TLAN_DUPLEX_FULL) { 2653 priv->duplex == TLAN_DUPLEX_FULL) {
2654 priv->tlanFullDuplex = TRUE; 2654 priv->tlanFullDuplex = true;
2655 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100); 2655 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
2656 } else if ( priv->speed == TLAN_SPEED_100 && 2656 } else if ( priv->speed == TLAN_SPEED_100 &&
2657 priv->duplex == TLAN_DUPLEX_HALF) { 2657 priv->duplex == TLAN_DUPLEX_HALF) {
2658 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000); 2658 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
2659 } else if ( priv->speed == TLAN_SPEED_100 && 2659 } else if ( priv->speed == TLAN_SPEED_100 &&
2660 priv->duplex == TLAN_DUPLEX_FULL) { 2660 priv->duplex == TLAN_DUPLEX_FULL) {
2661 priv->tlanFullDuplex = TRUE; 2661 priv->tlanFullDuplex = true;
2662 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100); 2662 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
2663 } else { 2663 } else {
2664 2664
@@ -2695,7 +2695,7 @@ static void TLan_PhyStartLink( struct net_device *dev )
2695 tctl &= ~TLAN_TC_AUISEL; 2695 tctl &= ~TLAN_TC_AUISEL;
2696 if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2696 if ( priv->duplex == TLAN_DUPLEX_FULL ) {
2697 control |= MII_GC_DUPLEX; 2697 control |= MII_GC_DUPLEX;
2698 priv->tlanFullDuplex = TRUE; 2698 priv->tlanFullDuplex = true;
2699 } 2699 }
2700 if ( priv->speed == TLAN_SPEED_100 ) { 2700 if ( priv->speed == TLAN_SPEED_100 ) {
2701 control |= MII_GC_SPEEDSEL; 2701 control |= MII_GC_SPEEDSEL;
@@ -2750,9 +2750,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2750 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa ); 2750 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
2751 mode = an_adv & an_lpa & 0x03E0; 2751 mode = an_adv & an_lpa & 0x03E0;
2752 if ( mode & 0x0100 ) { 2752 if ( mode & 0x0100 ) {
2753 priv->tlanFullDuplex = TRUE; 2753 priv->tlanFullDuplex = true;
2754 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) { 2754 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
2755 priv->tlanFullDuplex = TRUE; 2755 priv->tlanFullDuplex = true;
2756 } 2756 }
2757 2757
2758 if ( ( ! ( mode & 0x0180 ) ) && 2758 if ( ( ! ( mode & 0x0180 ) ) &&
@@ -2855,8 +2855,8 @@ void TLan_PhyMonitor( struct net_device *dev )
2855 * TLan_MiiReadReg 2855 * TLan_MiiReadReg
2856 * 2856 *
2857 * Returns: 2857 * Returns:
2858 * 0 if ack received ok 2858 * false if ack received ok
2859 * 1 otherwise. 2859 * true if no ack received or other error
2860 * 2860 *
2861 * Parms: 2861 * Parms:
2862 * dev The device structure containing 2862 * dev The device structure containing
@@ -2875,17 +2875,17 @@ void TLan_PhyMonitor( struct net_device *dev )
2875 * 2875 *
2876 **************************************************************/ 2876 **************************************************************/
2877 2877
2878static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) 2878static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
2879{ 2879{
2880 u8 nack; 2880 u8 nack;
2881 u16 sio, tmp; 2881 u16 sio, tmp;
2882 u32 i; 2882 u32 i;
2883 int err; 2883 bool err;
2884 int minten; 2884 int minten;
2885 TLanPrivateInfo *priv = netdev_priv(dev); 2885 TLanPrivateInfo *priv = netdev_priv(dev);
2886 unsigned long flags = 0; 2886 unsigned long flags = 0;
2887 2887
2888 err = FALSE; 2888 err = false;
2889 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); 2889 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2890 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2890 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2891 2891
@@ -2918,7 +2918,7 @@ static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
2918 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2918 TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
2919 } 2919 }
2920 tmp = 0xffff; 2920 tmp = 0xffff;
2921 err = TRUE; 2921 err = true;
2922 } else { /* ACK, so read data */ 2922 } else { /* ACK, so read data */
2923 for (tmp = 0, i = 0x8000; i; i >>= 1) { 2923 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2924 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 2924 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 4b82f283e98..d13ff12d750 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -31,9 +31,6 @@
31 * 31 *
32 ****************************************************************/ 32 ****************************************************************/
33 33
34#define FALSE 0
35#define TRUE 1
36
37#define TLAN_MIN_FRAME_SIZE 64 34#define TLAN_MIN_FRAME_SIZE 64
38#define TLAN_MAX_FRAME_SIZE 1600 35#define TLAN_MAX_FRAME_SIZE 1600
39 36
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index cf552d1d962..b0d7db9d8bb 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
117 * will be stuck with 1555 lines of hex #'s in the code. 117 * will be stuck with 1555 lines of hex #'s in the code.
118 */ 118 */
119 119
120static struct pci_device_id xl_pci_tbl[] = 120static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
121{ 121{
122 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, }, 122 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
123 { } /* terminate list */ 123 { } /* terminate list */
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b9db1b5a58a..515f122777a 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -45,7 +45,7 @@ static char version[] __devinitdata =
45 45
46#define ABYSS_IO_EXTENT 64 46#define ABYSS_IO_EXTENT 64
47 47
48static struct pci_device_id abyss_pci_tbl[] = { 48static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
49 { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2, 49 { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
50 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, }, 50 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
51 { } /* Terminating entry */ 51 { } /* Terminating entry */
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index d6ccd59c7d0..3f9d5a25562 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -146,7 +146,7 @@
146static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n" 146static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
147 " v0.5.3 11/13/02 - Kent Yoder"; 147 " v0.5.3 11/13/02 - Kent Yoder";
148 148
149static struct pci_device_id streamer_pci_tbl[] = { 149static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
150 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,}, 150 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
151 {} /* terminating entry */ 151 {} /* terminating entry */
152}; 152};
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index df32025c513..f010a4dc5f1 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -172,7 +172,7 @@ module_param_array(message_level, int, NULL, 0) ;
172static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,}; 172static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173module_param_array(network_monitor, int, NULL, 0); 173module_param_array(network_monitor, int, NULL, 0);
174 174
175static struct pci_device_id olympic_pci_tbl[] = { 175static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,}, 176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */ 177 { } /* Terminating Entry */
178}; 178};
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index f92fe86fdca..d4c7c0c0a3d 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -57,7 +57,7 @@ static struct card_info card_info_table[] = {
57 { {0x03, 0x01}, "3Com Token Link Velocity"}, 57 { {0x03, 0x01}, "3Com Token Link Velocity"},
58}; 58};
59 59
60static struct pci_device_id tmspci_pci_tbl[] = { 60static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
61 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 61 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
62 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 62 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
63 { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 63 { PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 9f6742fad6c..007d8e75666 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -43,8 +43,8 @@ void t21142_media_task(struct work_struct *work)
43 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) 43 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
44 csr12 |= 6; 44 csr12 |= 6;
45 if (tulip_debug > 2) 45 if (tulip_debug > 2)
46 printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n", 46 dev_info(&dev->dev, "21143 negotiation status %08x, %s\n",
47 dev->name, csr12, medianame[dev->if_port]); 47 csr12, medianame[dev->if_port]);
48 if (tulip_media_cap[dev->if_port] & MediaIsMII) { 48 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
49 if (tulip_check_duplex(dev) < 0) { 49 if (tulip_check_duplex(dev) < 0) {
50 netif_carrier_off(dev); 50 netif_carrier_off(dev);
@@ -56,23 +56,26 @@ void t21142_media_task(struct work_struct *work)
56 } else if (tp->nwayset) { 56 } else if (tp->nwayset) {
57 /* Don't screw up a negotiated session! */ 57 /* Don't screw up a negotiated session! */
58 if (tulip_debug > 1) 58 if (tulip_debug > 1)
59 printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n", 59 dev_info(&dev->dev,
60 dev->name, medianame[dev->if_port], csr12); 60 "Using NWay-set %s media, csr12 %08x\n",
61 medianame[dev->if_port], csr12);
61 } else if (tp->medialock) { 62 } else if (tp->medialock) {
62 ; 63 ;
63 } else if (dev->if_port == 3) { 64 } else if (dev->if_port == 3) {
64 if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */ 65 if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
65 if (tulip_debug > 1) 66 if (tulip_debug > 1)
66 printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, " 67 dev_info(&dev->dev,
67 "trying NWay.\n", dev->name, csr12); 68 "No 21143 100baseTx link beat, %08x, trying NWay\n",
69 csr12);
68 t21142_start_nway(dev); 70 t21142_start_nway(dev);
69 next_tick = 3*HZ; 71 next_tick = 3*HZ;
70 } 72 }
71 } else if ((csr12 & 0x7000) != 0x5000) { 73 } else if ((csr12 & 0x7000) != 0x5000) {
72 /* Negotiation failed. Search media types. */ 74 /* Negotiation failed. Search media types. */
73 if (tulip_debug > 1) 75 if (tulip_debug > 1)
74 printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n", 76 dev_info(&dev->dev,
75 dev->name, csr12); 77 "21143 negotiation failed, status %08x\n",
78 csr12);
76 if (!(csr12 & 4)) { /* 10mbps link beat good. */ 79 if (!(csr12 & 4)) { /* 10mbps link beat good. */
77 new_csr6 = 0x82420000; 80 new_csr6 = 0x82420000;
78 dev->if_port = 0; 81 dev->if_port = 0;
@@ -90,8 +93,8 @@ void t21142_media_task(struct work_struct *work)
90 iowrite32(1, ioaddr + CSR13); 93 iowrite32(1, ioaddr + CSR13);
91 } 94 }
92 if (tulip_debug > 1) 95 if (tulip_debug > 1)
93 printk(KERN_INFO"%s: Testing new 21143 media %s.\n", 96 dev_info(&dev->dev, "Testing new 21143 media %s\n",
94 dev->name, medianame[dev->if_port]); 97 medianame[dev->if_port]);
95 if (new_csr6 != (tp->csr6 & ~0x00D5)) { 98 if (new_csr6 != (tp->csr6 & ~0x00D5)) {
96 tp->csr6 &= 0x00D5; 99 tp->csr6 &= 0x00D5;
97 tp->csr6 |= new_csr6; 100 tp->csr6 |= new_csr6;
@@ -119,8 +122,8 @@ void t21142_start_nway(struct net_device *dev)
119 tp->nway = tp->mediasense = 1; 122 tp->nway = tp->mediasense = 1;
120 tp->nwayset = tp->lpar = 0; 123 tp->nwayset = tp->lpar = 0;
121 if (tulip_debug > 1) 124 if (tulip_debug > 1)
122 printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n", 125 printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%08x\n",
123 dev->name, csr14); 126 dev->name, csr14);
124 iowrite32(0x0001, ioaddr + CSR13); 127 iowrite32(0x0001, ioaddr + CSR13);
125 udelay(100); 128 udelay(100);
126 iowrite32(csr14, ioaddr + CSR14); 129 iowrite32(csr14, ioaddr + CSR14);
@@ -147,8 +150,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
147 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) 150 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
148 csr12 |= 6; 151 csr12 |= 6;
149 if (tulip_debug > 1) 152 if (tulip_debug > 1)
150 printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, " 153 dev_info(&dev->dev,
151 "%8.8x.\n", dev->name, csr12, csr5, csr14); 154 "21143 link status interrupt %08x, CSR5 %x, %08x\n",
155 csr12, csr5, csr14);
152 156
153 /* If NWay finished and we have a negotiated partner capability. */ 157 /* If NWay finished and we have a negotiated partner capability. */
154 if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) { 158 if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@@ -171,14 +175,15 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
171 175
172 if (tulip_debug > 1) { 176 if (tulip_debug > 1) {
173 if (tp->nwayset) 177 if (tp->nwayset)
174 printk(KERN_INFO "%s: Switching to %s based on link " 178 dev_info(&dev->dev,
175 "negotiation %4.4x & %4.4x = %4.4x.\n", 179 "Switching to %s based on link negotiation %04x & %04x = %04x\n",
176 dev->name, medianame[dev->if_port], tp->sym_advertise, 180 medianame[dev->if_port],
177 tp->lpar, negotiated); 181 tp->sym_advertise, tp->lpar,
182 negotiated);
178 else 183 else
179 printk(KERN_INFO "%s: Autonegotiation failed, using %s," 184 dev_info(&dev->dev,
180 " link beat status %4.4x.\n", 185 "Autonegotiation failed, using %s, link beat status %04x\n",
181 dev->name, medianame[dev->if_port], csr12); 186 medianame[dev->if_port], csr12);
182 } 187 }
183 188
184 if (tp->mtable) { 189 if (tp->mtable) {
@@ -201,14 +206,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
201#if 0 /* Restart shouldn't be needed. */ 206#if 0 /* Restart shouldn't be needed. */
202 iowrite32(tp->csr6 | RxOn, ioaddr + CSR6); 207 iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
203 if (tulip_debug > 2) 208 if (tulip_debug > 2)
204 printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n", 209 printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %08x\n",
205 dev->name, ioread32(ioaddr + CSR5)); 210 dev->name, ioread32(ioaddr + CSR5));
206#endif 211#endif
207 tulip_start_rxtx(tp); 212 tulip_start_rxtx(tp);
208 if (tulip_debug > 2) 213 if (tulip_debug > 2)
209 printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n", 214 printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
210 dev->name, tp->csr6, ioread32(ioaddr + CSR6), 215 dev->name, tp->csr6, ioread32(ioaddr + CSR6),
211 ioread32(ioaddr + CSR12)); 216 ioread32(ioaddr + CSR12));
212 } else if ((tp->nwayset && (csr5 & 0x08000000) && 217 } else if ((tp->nwayset && (csr5 & 0x08000000) &&
213 (dev->if_port == 3 || dev->if_port == 5) && 218 (dev->if_port == 3 || dev->if_port == 5) &&
214 (csr12 & 2) == 2) || 219 (csr12 & 2) == 2) ||
@@ -220,9 +225,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
220 add_timer(&tp->timer); 225 add_timer(&tp->timer);
221 } else if (dev->if_port == 3 || dev->if_port == 5) { 226 } else if (dev->if_port == 3 || dev->if_port == 5) {
222 if (tulip_debug > 1) 227 if (tulip_debug > 1)
223 printk(KERN_INFO"%s: 21143 %s link beat %s.\n", 228 dev_info(&dev->dev, "21143 %s link beat %s\n",
224 dev->name, medianame[dev->if_port], 229 medianame[dev->if_port],
225 (csr12 & 2) ? "failed" : "good"); 230 (csr12 & 2) ? "failed" : "good");
226 if ((csr12 & 2) && ! tp->medialock) { 231 if ((csr12 & 2) && ! tp->medialock) {
227 del_timer_sync(&tp->timer); 232 del_timer_sync(&tp->timer);
228 t21142_start_nway(dev); 233 t21142_start_nway(dev);
@@ -232,21 +237,18 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
232 iowrite32(csr14 & ~0x080, ioaddr + CSR14); 237 iowrite32(csr14 & ~0x080, ioaddr + CSR14);
233 } else if (dev->if_port == 0 || dev->if_port == 4) { 238 } else if (dev->if_port == 0 || dev->if_port == 4) {
234 if ((csr12 & 4) == 0) 239 if ((csr12 & 4) == 0)
235 printk(KERN_INFO"%s: 21143 10baseT link beat good.\n", 240 dev_info(&dev->dev, "21143 10baseT link beat good\n");
236 dev->name);
237 } else if (!(csr12 & 4)) { /* 10mbps link beat good. */ 241 } else if (!(csr12 & 4)) { /* 10mbps link beat good. */
238 if (tulip_debug) 242 if (tulip_debug)
239 printk(KERN_INFO"%s: 21143 10mbps sensed media.\n", 243 dev_info(&dev->dev, "21143 10mbps sensed media\n");
240 dev->name);
241 dev->if_port = 0; 244 dev->if_port = 0;
242 } else if (tp->nwayset) { 245 } else if (tp->nwayset) {
243 if (tulip_debug) 246 if (tulip_debug)
244 printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n", 247 dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n",
245 dev->name, medianame[dev->if_port], tp->csr6); 248 medianame[dev->if_port], tp->csr6);
246 } else { /* 100mbps link beat good. */ 249 } else { /* 100mbps link beat good. */
247 if (tulip_debug) 250 if (tulip_debug)
248 printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n", 251 dev_info(&dev->dev, "21143 100baseTx sensed media\n");
249 dev->name);
250 dev->if_port = 3; 252 dev->if_port = 3;
251 tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff); 253 tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
252 iowrite32(0x0003FF7F, ioaddr + CSR14); 254 iowrite32(0x0003FF7F, ioaddr + CSR14);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4255d44cb7..29330209ad8 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -337,7 +337,7 @@ static void de21041_media_timer (unsigned long data);
337static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media); 337static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
338 338
339 339
340static struct pci_device_id de_pci_tbl[] = { 340static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
341 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, 341 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
342 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 342 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, 343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
@@ -382,9 +382,9 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
382 /* Ingore earlier buffers. */ 382 /* Ingore earlier buffers. */
383 if ((status & 0xffff) != 0x7fff) { 383 if ((status & 0xffff) != 0x7fff) {
384 if (netif_msg_rx_err(de)) 384 if (netif_msg_rx_err(de))
385 printk(KERN_WARNING "%s: Oversized Ethernet frame " 385 dev_warn(&de->dev->dev,
386 "spanned multiple buffers, status %8.8x!\n", 386 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
387 de->dev->name, status); 387 status);
388 de->net_stats.rx_length_errors++; 388 de->net_stats.rx_length_errors++;
389 } 389 }
390 } else if (status & RxError) { 390 } else if (status & RxError) {
@@ -487,7 +487,7 @@ rx_next:
487 } 487 }
488 488
489 if (!rx_work) 489 if (!rx_work)
490 printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name); 490 dev_warn(&de->dev->dev, "rx work limit reached\n");
491 491
492 de->rx_tail = rx_tail; 492 de->rx_tail = rx_tail;
493} 493}
@@ -504,7 +504,8 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
504 504
505 if (netif_msg_intr(de)) 505 if (netif_msg_intr(de))
506 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n", 506 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
507 dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail); 507 dev->name, status, dr32(MacMode),
508 de->rx_tail, de->tx_head, de->tx_tail);
508 509
509 dw32(MacStatus, status); 510 dw32(MacStatus, status);
510 511
@@ -529,8 +530,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
529 530
530 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status); 531 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
531 pci_write_config_word(de->pdev, PCI_STATUS, pci_status); 532 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
532 printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n", 533 dev_err(&de->dev->dev,
533 dev->name, status, pci_status); 534 "PCI bus error, status=%08x, PCI status=%04x\n",
535 status, pci_status);
534 } 536 }
535 537
536 return IRQ_HANDLED; 538 return IRQ_HANDLED;
@@ -582,7 +584,8 @@ static void de_tx (struct de_private *de)
582 de->net_stats.tx_packets++; 584 de->net_stats.tx_packets++;
583 de->net_stats.tx_bytes += skb->len; 585 de->net_stats.tx_bytes += skb->len;
584 if (netif_msg_tx_done(de)) 586 if (netif_msg_tx_done(de))
585 printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail); 587 printk(KERN_DEBUG "%s: tx done, slot %d\n",
588 de->dev->name, tx_tail);
586 } 589 }
587 dev_kfree_skb_irq(skb); 590 dev_kfree_skb_irq(skb);
588 } 591 }
@@ -870,7 +873,7 @@ static void de_stop_rxtx (struct de_private *de)
870 udelay(100); 873 udelay(100);
871 } 874 }
872 875
873 printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name); 876 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
874} 877}
875 878
876static inline void de_start_rxtx (struct de_private *de) 879static inline void de_start_rxtx (struct de_private *de)
@@ -905,8 +908,8 @@ static void de_link_up(struct de_private *de)
905 if (!netif_carrier_ok(de->dev)) { 908 if (!netif_carrier_ok(de->dev)) {
906 netif_carrier_on(de->dev); 909 netif_carrier_on(de->dev);
907 if (netif_msg_link(de)) 910 if (netif_msg_link(de))
908 printk(KERN_INFO "%s: link up, media %s\n", 911 dev_info(&de->dev->dev, "link up, media %s\n",
909 de->dev->name, media_name[de->media_type]); 912 media_name[de->media_type]);
910 } 913 }
911} 914}
912 915
@@ -915,7 +918,7 @@ static void de_link_down(struct de_private *de)
915 if (netif_carrier_ok(de->dev)) { 918 if (netif_carrier_ok(de->dev)) {
916 netif_carrier_off(de->dev); 919 netif_carrier_off(de->dev);
917 if (netif_msg_link(de)) 920 if (netif_msg_link(de))
918 printk(KERN_INFO "%s: link down\n", de->dev->name); 921 dev_info(&de->dev->dev, "link down\n");
919 } 922 }
920} 923}
921 924
@@ -925,7 +928,8 @@ static void de_set_media (struct de_private *de)
925 u32 macmode = dr32(MacMode); 928 u32 macmode = dr32(MacMode);
926 929
927 if (de_is_running(de)) 930 if (de_is_running(de))
928 printk(KERN_WARNING "%s: chip is running while changing media!\n", de->dev->name); 931 dev_warn(&de->dev->dev,
932 "chip is running while changing media!\n");
929 933
930 if (de->de21040) 934 if (de->de21040)
931 dw32(CSR11, FULL_DUPLEX_MAGIC); 935 dw32(CSR11, FULL_DUPLEX_MAGIC);
@@ -945,15 +949,15 @@ static void de_set_media (struct de_private *de)
945 macmode &= ~FullDuplex; 949 macmode &= ~FullDuplex;
946 950
947 if (netif_msg_link(de)) { 951 if (netif_msg_link(de)) {
948 printk(KERN_INFO 952 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
949 "%s: set link %s\n" 953 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
950 "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n" 954 dr32(MacMode), dr32(SIAStatus),
951 "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n", 955 dr32(CSR13), dr32(CSR14), dr32(CSR15));
952 de->dev->name, media_name[media], 956
953 de->dev->name, dr32(MacMode), dr32(SIAStatus), 957 dev_info(&de->dev->dev,
954 dr32(CSR13), dr32(CSR14), dr32(CSR15), 958 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
955 de->dev->name, macmode, de->media[media].csr13, 959 macmode, de->media[media].csr13,
956 de->media[media].csr14, de->media[media].csr15); 960 de->media[media].csr14, de->media[media].csr15);
957 } 961 }
958 if (macmode != dr32(MacMode)) 962 if (macmode != dr32(MacMode))
959 dw32(MacMode, macmode); 963 dw32(MacMode, macmode);
@@ -992,9 +996,8 @@ static void de21040_media_timer (unsigned long data)
992 de_link_up(de); 996 de_link_up(de);
993 else 997 else
994 if (netif_msg_timer(de)) 998 if (netif_msg_timer(de))
995 printk(KERN_INFO "%s: %s link ok, status %x\n", 999 dev_info(&dev->dev, "%s link ok, status %x\n",
996 dev->name, media_name[de->media_type], 1000 media_name[de->media_type], status);
997 status);
998 return; 1001 return;
999 } 1002 }
1000 1003
@@ -1022,8 +1025,8 @@ no_link_yet:
1022 add_timer(&de->media_timer); 1025 add_timer(&de->media_timer);
1023 1026
1024 if (netif_msg_timer(de)) 1027 if (netif_msg_timer(de))
1025 printk(KERN_INFO "%s: no link, trying media %s, status %x\n", 1028 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1026 dev->name, media_name[de->media_type], status); 1029 media_name[de->media_type], status);
1027} 1030}
1028 1031
1029static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media) 1032static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
@@ -1079,9 +1082,10 @@ static void de21041_media_timer (unsigned long data)
1079 de_link_up(de); 1082 de_link_up(de);
1080 else 1083 else
1081 if (netif_msg_timer(de)) 1084 if (netif_msg_timer(de))
1082 printk(KERN_INFO "%s: %s link ok, mode %x status %x\n", 1085 dev_info(&dev->dev,
1083 dev->name, media_name[de->media_type], 1086 "%s link ok, mode %x status %x\n",
1084 dr32(MacMode), status); 1087 media_name[de->media_type],
1088 dr32(MacMode), status);
1085 return; 1089 return;
1086 } 1090 }
1087 1091
@@ -1150,8 +1154,8 @@ no_link_yet:
1150 add_timer(&de->media_timer); 1154 add_timer(&de->media_timer);
1151 1155
1152 if (netif_msg_timer(de)) 1156 if (netif_msg_timer(de))
1153 printk(KERN_INFO "%s: no link, trying media %s, status %x\n", 1157 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1154 dev->name, media_name[de->media_type], status); 1158 media_name[de->media_type], status);
1155} 1159}
1156 1160
1157static void de_media_interrupt (struct de_private *de, u32 status) 1161static void de_media_interrupt (struct de_private *de, u32 status)
@@ -1378,8 +1382,7 @@ static int de_open (struct net_device *dev)
1378 1382
1379 rc = de_alloc_rings(de); 1383 rc = de_alloc_rings(de);
1380 if (rc) { 1384 if (rc) {
1381 printk(KERN_ERR "%s: ring allocation failure, err=%d\n", 1385 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1382 dev->name, rc);
1383 return rc; 1386 return rc;
1384 } 1387 }
1385 1388
@@ -1387,15 +1390,14 @@ static int de_open (struct net_device *dev)
1387 1390
1388 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev); 1391 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1389 if (rc) { 1392 if (rc) {
1390 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n", 1393 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1391 dev->name, dev->irq, rc); 1394 dev->irq, rc);
1392 goto err_out_free; 1395 goto err_out_free;
1393 } 1396 }
1394 1397
1395 rc = de_init_hw(de); 1398 rc = de_init_hw(de);
1396 if (rc) { 1399 if (rc) {
1397 printk(KERN_ERR "%s: h/w init failure, err=%d\n", 1400 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
1398 dev->name, rc);
1399 goto err_out_free_irq; 1401 goto err_out_free_irq;
1400 } 1402 }
1401 1403
@@ -1666,8 +1668,8 @@ static int de_nway_reset(struct net_device *dev)
1666 status = dr32(SIAStatus); 1668 status = dr32(SIAStatus);
1667 dw32(SIAStatus, (status & ~NWayState) | NWayRestart); 1669 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1668 if (netif_msg_link(de)) 1670 if (netif_msg_link(de))
1669 printk(KERN_INFO "%s: link nway restart, status %x,%x\n", 1671 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1670 de->dev->name, status, dr32(SIAStatus)); 1672 status, dr32(SIAStatus));
1671 return 0; 1673 return 0;
1672} 1674}
1673 1675
@@ -1711,7 +1713,7 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
1711 de->dev->dev_addr[i] = value; 1713 de->dev->dev_addr[i] = value;
1712 udelay(1); 1714 udelay(1);
1713 if (boguscnt <= 0) 1715 if (boguscnt <= 0)
1714 printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i); 1716 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1715 } 1717 }
1716} 1718}
1717 1719
@@ -1830,9 +1832,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
1830 } 1832 }
1831 1833
1832 if (netif_msg_probe(de)) 1834 if (netif_msg_probe(de))
1833 printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n", 1835 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1834 de->board_idx, ofs, 1836 de->board_idx, ofs, media_name[de->media_type]);
1835 media_name[de->media_type]);
1836 1837
1837 /* init SIA register values to defaults */ 1838 /* init SIA register values to defaults */
1838 for (i = 0; i < DE_MAX_MEDIA; i++) { 1839 for (i = 0; i < DE_MAX_MEDIA; i++) {
@@ -1879,9 +1880,9 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
1879 de->media[idx].type = idx; 1880 de->media[idx].type = idx;
1880 1881
1881 if (netif_msg_probe(de)) 1882 if (netif_msg_probe(de))
1882 printk(KERN_INFO "de%d: media block #%u: %s", 1883 pr_info("de%d: media block #%u: %s",
1883 de->board_idx, i, 1884 de->board_idx, i,
1884 media_name[de->media[idx].type]); 1885 media_name[de->media[idx].type]);
1885 1886
1886 bufp += sizeof (ib->opts); 1887 bufp += sizeof (ib->opts);
1887 1888
@@ -1893,13 +1894,13 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
1893 sizeof(ib->csr15); 1894 sizeof(ib->csr15);
1894 1895
1895 if (netif_msg_probe(de)) 1896 if (netif_msg_probe(de))
1896 printk(" (%x,%x,%x)\n", 1897 pr_cont(" (%x,%x,%x)\n",
1897 de->media[idx].csr13, 1898 de->media[idx].csr13,
1898 de->media[idx].csr14, 1899 de->media[idx].csr14,
1899 de->media[idx].csr15); 1900 de->media[idx].csr15);
1900 1901
1901 } else if (netif_msg_probe(de)) 1902 } else if (netif_msg_probe(de))
1902 printk("\n"); 1903 pr_cont("\n");
1903 1904
1904 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3])) 1905 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1905 break; 1906 break;
@@ -2005,7 +2006,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2005 /* check for invalid IRQ value */ 2006 /* check for invalid IRQ value */
2006 if (pdev->irq < 2) { 2007 if (pdev->irq < 2) {
2007 rc = -EIO; 2008 rc = -EIO;
2008 printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n", 2009 pr_err(PFX "invalid irq (%d) for pci dev %s\n",
2009 pdev->irq, pci_name(pdev)); 2010 pdev->irq, pci_name(pdev));
2010 goto err_out_res; 2011 goto err_out_res;
2011 } 2012 }
@@ -2016,14 +2017,14 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2016 pciaddr = pci_resource_start(pdev, 1); 2017 pciaddr = pci_resource_start(pdev, 1);
2017 if (!pciaddr) { 2018 if (!pciaddr) {
2018 rc = -EIO; 2019 rc = -EIO;
2019 printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n", 2020 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
2020 pci_name(pdev));
2021 goto err_out_res; 2021 goto err_out_res;
2022 } 2022 }
2023 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) { 2023 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2024 rc = -EIO; 2024 rc = -EIO;
2025 printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n", 2025 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2026 (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev)); 2026 (unsigned long long)pci_resource_len(pdev, 1),
2027 pci_name(pdev));
2027 goto err_out_res; 2028 goto err_out_res;
2028 } 2029 }
2029 2030
@@ -2031,9 +2032,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2031 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE); 2032 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2032 if (!regs) { 2033 if (!regs) {
2033 rc = -EIO; 2034 rc = -EIO;
2034 printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n", 2035 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2035 (unsigned long long)pci_resource_len(pdev, 1), 2036 (unsigned long long)pci_resource_len(pdev, 1),
2036 pciaddr, pci_name(pdev)); 2037 pciaddr, pci_name(pdev));
2037 goto err_out_res; 2038 goto err_out_res;
2038 } 2039 }
2039 dev->base_addr = (unsigned long) regs; 2040 dev->base_addr = (unsigned long) regs;
@@ -2044,8 +2045,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2044 /* make sure hardware is not running */ 2045 /* make sure hardware is not running */
2045 rc = de_reset_mac(de); 2046 rc = de_reset_mac(de);
2046 if (rc) { 2047 if (rc) {
2047 printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n", 2048 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2048 pci_name(pdev));
2049 goto err_out_iomap; 2049 goto err_out_iomap;
2050 } 2050 }
2051 2051
@@ -2065,12 +2065,11 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2065 goto err_out_iomap; 2065 goto err_out_iomap;
2066 2066
2067 /* print info about board and interface just registered */ 2067 /* print info about board and interface just registered */
2068 printk (KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n", 2068 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2069 dev->name, 2069 de->de21040 ? "21040" : "21041",
2070 de->de21040 ? "21040" : "21041", 2070 dev->base_addr,
2071 dev->base_addr, 2071 dev->dev_addr,
2072 dev->dev_addr, 2072 dev->irq);
2073 dev->irq);
2074 2073
2075 pci_set_drvdata(pdev, dev); 2074 pci_set_drvdata(pdev, dev);
2076 2075
@@ -2158,8 +2157,7 @@ static int de_resume (struct pci_dev *pdev)
2158 if (!netif_running(dev)) 2157 if (!netif_running(dev))
2159 goto out_attach; 2158 goto out_attach;
2160 if ((retval = pci_enable_device(pdev))) { 2159 if ((retval = pci_enable_device(pdev))) {
2161 printk (KERN_ERR "%s: pci_enable_device failed in resume\n", 2160 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
2162 dev->name);
2163 goto out; 2161 goto out;
2164 } 2162 }
2165 de_init_hw(de); 2163 de_init_hw(de);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 6f44ebf5891..5fc61c1012e 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -61,6 +61,8 @@
61 Test and make sure PCI latency is now correct for all cases. 61 Test and make sure PCI latency is now correct for all cases.
62*/ 62*/
63 63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
64#define DRV_NAME "dmfe" 66#define DRV_NAME "dmfe"
65#define DRV_VERSION "1.36.4" 67#define DRV_VERSION "1.36.4"
66#define DRV_RELDATE "2002-01-17" 68#define DRV_RELDATE "2002-01-17"
@@ -149,16 +151,17 @@
149#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ 151#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
150#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ 152#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
151 153
152#define DMFE_DBUG(dbug_now, msg, value) \ 154#define DMFE_DBUG(dbug_now, msg, value) \
153 do { \ 155 do { \
154 if (dmfe_debug || (dbug_now)) \ 156 if (dmfe_debug || (dbug_now)) \
155 printk(KERN_ERR DRV_NAME ": %s %lx\n",\ 157 pr_err("%s %lx\n", \
156 (msg), (long) (value)); \ 158 (msg), (long) (value)); \
157 } while (0) 159 } while (0)
158 160
159#define SHOW_MEDIA_TYPE(mode) \ 161#define SHOW_MEDIA_TYPE(mode) \
160 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \ 162 pr_info("Change Speed to %sMhz %s duplex\n" , \
161 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half"); 163 (mode & 1) ? "100":"10", \
164 (mode & 4) ? "full":"half");
162 165
163 166
164/* CR9 definition: SROM/MII */ 167/* CR9 definition: SROM/MII */
@@ -391,8 +394,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
391 struct device_node *dp = pci_device_to_OF_node(pdev); 394 struct device_node *dp = pci_device_to_OF_node(pdev);
392 395
393 if (dp && of_get_property(dp, "local-mac-address", NULL)) { 396 if (dp && of_get_property(dp, "local-mac-address", NULL)) {
394 printk(KERN_INFO DRV_NAME 397 pr_info("skipping on-board DM910x (use tulip)\n");
395 ": skipping on-board DM910x (use tulip)\n");
396 return -ENODEV; 398 return -ENODEV;
397 } 399 }
398 } 400 }
@@ -405,8 +407,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
405 SET_NETDEV_DEV(dev, &pdev->dev); 407 SET_NETDEV_DEV(dev, &pdev->dev);
406 408
407 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 409 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
408 printk(KERN_WARNING DRV_NAME 410 pr_warning("32-bit PCI DMA not available\n");
409 ": 32-bit PCI DMA not available.\n");
410 err = -ENODEV; 411 err = -ENODEV;
411 goto err_out_free; 412 goto err_out_free;
412 } 413 }
@@ -417,13 +418,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
417 goto err_out_free; 418 goto err_out_free;
418 419
419 if (!pci_resource_start(pdev, 0)) { 420 if (!pci_resource_start(pdev, 0)) {
420 printk(KERN_ERR DRV_NAME ": I/O base is zero\n"); 421 pr_err("I/O base is zero\n");
421 err = -ENODEV; 422 err = -ENODEV;
422 goto err_out_disable; 423 goto err_out_disable;
423 } 424 }
424 425
425 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) { 426 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
426 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n"); 427 pr_err("Allocated I/O size too small\n");
427 err = -ENODEV; 428 err = -ENODEV;
428 goto err_out_disable; 429 goto err_out_disable;
429 } 430 }
@@ -438,7 +439,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
438#endif 439#endif
439 440
440 if (pci_request_regions(pdev, DRV_NAME)) { 441 if (pci_request_regions(pdev, DRV_NAME)) {
441 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); 442 pr_err("Failed to request PCI regions\n");
442 err = -ENODEV; 443 err = -ENODEV;
443 goto err_out_disable; 444 goto err_out_disable;
444 } 445 }
@@ -497,12 +498,9 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
497 if (err) 498 if (err)
498 goto err_out_free_buf; 499 goto err_out_free_buf;
499 500
500 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, %pM, irq %d.\n", 501 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
501 dev->name, 502 ent->driver_data >> 16,
502 ent->driver_data >> 16, 503 pci_name(pdev), dev->dev_addr, dev->irq);
503 pci_name(pdev),
504 dev->dev_addr,
505 dev->irq);
506 504
507 pci_set_master(pdev); 505 pci_set_master(pdev);
508 506
@@ -696,7 +694,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
696 694
697 /* Too large packet check */ 695 /* Too large packet check */
698 if (skb->len > MAX_PACKET_SIZE) { 696 if (skb->len > MAX_PACKET_SIZE) {
699 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len); 697 pr_err("big packet = %d\n", (u16)skb->len);
700 dev_kfree_skb(skb); 698 dev_kfree_skb(skb);
701 return NETDEV_TX_OK; 699 return NETDEV_TX_OK;
702 } 700 }
@@ -706,8 +704,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
706 /* No Tx resource check, it never happen nromally */ 704 /* No Tx resource check, it never happen nromally */
707 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { 705 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
708 spin_unlock_irqrestore(&db->lock, flags); 706 spin_unlock_irqrestore(&db->lock, flags);
709 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", 707 pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
710 db->tx_queue_cnt);
711 return NETDEV_TX_BUSY; 708 return NETDEV_TX_BUSY;
712 } 709 }
713 710
@@ -779,12 +776,11 @@ static int dmfe_stop(struct DEVICE *dev)
779 776
780#if 0 777#if 0
781 /* show statistic counter */ 778 /* show statistic counter */
782 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx" 779 printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
783 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", 780 db->tx_fifo_underrun, db->tx_excessive_collision,
784 db->tx_fifo_underrun, db->tx_excessive_collision, 781 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
785 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, 782 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
786 db->tx_jabber_timeout, db->reset_count, db->reset_cr8, 783 db->reset_fatal, db->reset_TXtimeout);
787 db->reset_fatal, db->reset_TXtimeout);
788#endif 784#endif
789 785
790 return 0; 786 return 0;
@@ -885,7 +881,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
885 txptr = db->tx_remove_ptr; 881 txptr = db->tx_remove_ptr;
886 while(db->tx_packet_cnt) { 882 while(db->tx_packet_cnt) {
887 tdes0 = le32_to_cpu(txptr->tdes0); 883 tdes0 = le32_to_cpu(txptr->tdes0);
888 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */ 884 pr_debug("tdes0=%x\n", tdes0);
889 if (tdes0 & 0x80000000) 885 if (tdes0 & 0x80000000)
890 break; 886 break;
891 887
@@ -895,7 +891,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
895 891
896 /* Transmit statistic counter */ 892 /* Transmit statistic counter */
897 if ( tdes0 != 0x7fffffff ) { 893 if ( tdes0 != 0x7fffffff ) {
898 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */ 894 pr_debug("tdes0=%x\n", tdes0);
899 dev->stats.collisions += (tdes0 >> 3) & 0xf; 895 dev->stats.collisions += (tdes0 >> 3) & 0xf;
900 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; 896 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
901 if (tdes0 & TDES0_ERR_MASK) { 897 if (tdes0 & TDES0_ERR_MASK) {
@@ -992,7 +988,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
992 /* error summary bit check */ 988 /* error summary bit check */
993 if (rdes0 & 0x8000) { 989 if (rdes0 & 0x8000) {
994 /* This is a error packet */ 990 /* This is a error packet */
995 //printk(DRV_NAME ": rdes0: %lx\n", rdes0); 991 pr_debug("rdes0: %x\n", rdes0);
996 dev->stats.rx_errors++; 992 dev->stats.rx_errors++;
997 if (rdes0 & 1) 993 if (rdes0 & 1)
998 dev->stats.rx_fifo_errors++; 994 dev->stats.rx_fifo_errors++;
@@ -1191,8 +1187,7 @@ static void dmfe_timer(unsigned long data)
1191 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) { 1187 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1192 db->reset_TXtimeout++; 1188 db->reset_TXtimeout++;
1193 db->wait_reset = 1; 1189 db->wait_reset = 1;
1194 printk(KERN_WARNING "%s: Tx timeout - resetting\n", 1190 dev_warn(&dev->dev, "Tx timeout - resetting\n");
1195 dev->name);
1196 } 1191 }
1197 } 1192 }
1198 1193
@@ -1646,7 +1641,7 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1646 else /* DM9102/DM9102A */ 1641 else /* DM9102/DM9102A */
1647 phy_mode = phy_read(db->ioaddr, 1642 phy_mode = phy_read(db->ioaddr,
1648 db->phy_addr, 17, db->chip_id) & 0xf000; 1643 db->phy_addr, 17, db->chip_id) & 0xf000;
1649 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ 1644 pr_debug("Phy_mode %x\n", phy_mode);
1650 switch (phy_mode) { 1645 switch (phy_mode) {
1651 case 0x1000: db->op_mode = DMFE_10MHF; break; 1646 case 0x1000: db->op_mode = DMFE_10MHF; break;
1652 case 0x2000: db->op_mode = DMFE_10MFD; break; 1647 case 0x2000: db->op_mode = DMFE_10MFD; break;
@@ -2089,7 +2084,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2089 2084
2090 2085
2091 2086
2092static struct pci_device_id dmfe_pci_tbl[] = { 2087static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
2093 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID }, 2088 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2094 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID }, 2089 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2095 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID }, 2090 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 889f57aae89..93f4e8309f8 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -161,15 +161,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
161 if (ee_data[0] == 0xff) { 161 if (ee_data[0] == 0xff) {
162 if (last_mediatable) { 162 if (last_mediatable) {
163 controller_index++; 163 controller_index++;
164 printk(KERN_INFO "%s: Controller %d of multiport board.\n", 164 dev_info(&dev->dev,
165 dev->name, controller_index); 165 "Controller %d of multiport board\n",
166 controller_index);
166 tp->mtable = last_mediatable; 167 tp->mtable = last_mediatable;
167 ee_data = last_ee_data; 168 ee_data = last_ee_data;
168 goto subsequent_board; 169 goto subsequent_board;
169 } else 170 } else
170 printk(KERN_INFO "%s: Missing EEPROM, this interface may " 171 dev_info(&dev->dev,
171 "not work correctly!\n", 172 "Missing EEPROM, this interface may not work correctly!\n");
172 dev->name);
173 return; 173 return;
174 } 174 }
175 /* Do a fix-up based on the vendor half of the station address prefix. */ 175 /* Do a fix-up based on the vendor half of the station address prefix. */
@@ -181,16 +181,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
181 i++; /* An Accton EN1207, not an outlaw Maxtech. */ 181 i++; /* An Accton EN1207, not an outlaw Maxtech. */
182 memcpy(ee_data + 26, eeprom_fixups[i].newtable, 182 memcpy(ee_data + 26, eeprom_fixups[i].newtable,
183 sizeof(eeprom_fixups[i].newtable)); 183 sizeof(eeprom_fixups[i].newtable));
184 printk(KERN_INFO "%s: Old format EEPROM on '%s' board. Using" 184 dev_info(&dev->dev,
185 " substitute media control info.\n", 185 "Old format EEPROM on '%s' board. Using substitute media control info\n",
186 dev->name, eeprom_fixups[i].name); 186 eeprom_fixups[i].name);
187 break; 187 break;
188 } 188 }
189 } 189 }
190 if (eeprom_fixups[i].name == NULL) { /* No fixup found. */ 190 if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
191 printk(KERN_INFO "%s: Old style EEPROM with no media selection " 191 dev_info(&dev->dev,
192 "information.\n", 192 "Old style EEPROM with no media selection information\n");
193 dev->name);
194 return; 193 return;
195 } 194 }
196 } 195 }
@@ -218,7 +217,8 @@ subsequent_board:
218 /* there is no phy information, don't even try to build mtable */ 217 /* there is no phy information, don't even try to build mtable */
219 if (count == 0) { 218 if (count == 0) {
220 if (tulip_debug > 0) 219 if (tulip_debug > 0)
221 printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", dev->name); 220 dev_warn(&dev->dev,
221 "no phy info, aborting mtable build\n");
222 return; 222 return;
223 } 223 }
224 224
@@ -234,8 +234,8 @@ subsequent_board:
234 mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0; 234 mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
235 mtable->csr15dir = mtable->csr15val = 0; 235 mtable->csr15dir = mtable->csr15val = 0;
236 236
237 printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name, 237 dev_info(&dev->dev, "EEPROM default media type %s\n",
238 media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]); 238 media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
239 for (i = 0; i < count; i++) { 239 for (i = 0; i < count; i++) {
240 struct medialeaf *leaf = &mtable->mleaf[i]; 240 struct medialeaf *leaf = &mtable->mleaf[i];
241 241
@@ -298,16 +298,17 @@ subsequent_board:
298 } 298 }
299 if (tulip_debug > 1 && leaf->media == 11) { 299 if (tulip_debug > 1 && leaf->media == 11) {
300 unsigned char *bp = leaf->leafdata; 300 unsigned char *bp = leaf->leafdata;
301 printk(KERN_INFO "%s: MII interface PHY %d, setup/reset " 301 dev_info(&dev->dev,
302 "sequences %d/%d long, capabilities %2.2x %2.2x.\n", 302 "MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n",
303 dev->name, bp[0], bp[1], bp[2 + bp[1]*2], 303 bp[0], bp[1], bp[2 + bp[1]*2],
304 bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]); 304 bp[5 + bp[2 + bp[1]*2]*2],
305 bp[4 + bp[2 + bp[1]*2]*2]);
305 } 306 }
306 printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described " 307 dev_info(&dev->dev,
307 "by a %s (%d) block.\n", 308 "Index #%d - Media %s (#%d) described by a %s (%d) block\n",
308 dev->name, i, medianame[leaf->media & 15], leaf->media, 309 i, medianame[leaf->media & 15], leaf->media,
309 leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>", 310 leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
310 leaf->type); 311 leaf->type);
311 } 312 }
312 if (new_advertise) 313 if (new_advertise)
313 tp->sym_advertise = new_advertise; 314 tp->sym_advertise = new_advertise;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 2e8e8ee893c..1faf7a4d720 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -125,12 +125,12 @@ int tulip_poll(struct napi_struct *napi, int budget)
125#endif 125#endif
126 126
127 if (tulip_debug > 4) 127 if (tulip_debug > 4)
128 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry, 128 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
129 tp->rx_ring[entry].status); 129 entry, tp->rx_ring[entry].status);
130 130
131 do { 131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { 132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n"); 133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
134 break; 134 break;
135 } 135 }
136 /* Acknowledge current RX interrupt sources. */ 136 /* Acknowledge current RX interrupt sources. */
@@ -146,7 +146,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
146 break; 146 break;
147 147
148 if (tulip_debug > 5) 148 if (tulip_debug > 5)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", 149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
150 dev->name, entry, status); 150 dev->name, entry, status);
151 151
152 if (++work_done >= budget) 152 if (++work_done >= budget)
@@ -177,15 +177,15 @@ int tulip_poll(struct napi_struct *napi, int budget)
177 /* Ingore earlier buffers. */ 177 /* Ingore earlier buffers. */
178 if ((status & 0xffff) != 0x7fff) { 178 if ((status & 0xffff) != 0x7fff) {
179 if (tulip_debug > 1) 179 if (tulip_debug > 1)
180 printk(KERN_WARNING "%s: Oversized Ethernet frame " 180 dev_warn(&dev->dev,
181 "spanned multiple buffers, status %8.8x!\n", 181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
182 dev->name, status); 182 status);
183 tp->stats.rx_length_errors++; 183 tp->stats.rx_length_errors++;
184 } 184 }
185 } else { 185 } else {
186 /* There was a fatal error. */ 186 /* There was a fatal error. */
187 if (tulip_debug > 2) 187 if (tulip_debug > 2)
188 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", 188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
189 dev->name, status); 189 dev->name, status);
190 tp->stats.rx_errors++; /* end of a packet.*/ 190 tp->stats.rx_errors++; /* end of a packet.*/
191 if (pkt_len > 1518 || 191 if (pkt_len > 1518 ||
@@ -226,12 +226,11 @@ int tulip_poll(struct napi_struct *napi, int budget)
226#ifndef final_version 226#ifndef final_version
227 if (tp->rx_buffers[entry].mapping != 227 if (tp->rx_buffers[entry].mapping !=
228 le32_to_cpu(tp->rx_ring[entry].buffer1)) { 228 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
229 printk(KERN_ERR "%s: Internal fault: The skbuff addresses " 229 dev_err(&dev->dev,
230 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n", 230 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
231 dev->name, 231 le32_to_cpu(tp->rx_ring[entry].buffer1),
232 le32_to_cpu(tp->rx_ring[entry].buffer1), 232 (unsigned long long)tp->rx_buffers[entry].mapping,
233 (unsigned long long)tp->rx_buffers[entry].mapping, 233 skb->head, temp);
234 skb->head, temp);
235 } 234 }
236#endif 235#endif
237 236
@@ -365,16 +364,16 @@ static int tulip_rx(struct net_device *dev)
365 int received = 0; 364 int received = 0;
366 365
367 if (tulip_debug > 4) 366 if (tulip_debug > 4)
368 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry, 367 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
369 tp->rx_ring[entry].status); 368 entry, tp->rx_ring[entry].status);
370 /* If we own the next entry, it is a new packet. Send it up. */ 369 /* If we own the next entry, it is a new packet. Send it up. */
371 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { 370 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
372 s32 status = le32_to_cpu(tp->rx_ring[entry].status); 371 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
373 short pkt_len; 372 short pkt_len;
374 373
375 if (tulip_debug > 5) 374 if (tulip_debug > 5)
376 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", 375 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
377 dev->name, entry, status); 376 dev->name, entry, status);
378 if (--rx_work_limit < 0) 377 if (--rx_work_limit < 0)
379 break; 378 break;
380 379
@@ -402,16 +401,16 @@ static int tulip_rx(struct net_device *dev)
402 /* Ingore earlier buffers. */ 401 /* Ingore earlier buffers. */
403 if ((status & 0xffff) != 0x7fff) { 402 if ((status & 0xffff) != 0x7fff) {
404 if (tulip_debug > 1) 403 if (tulip_debug > 1)
405 printk(KERN_WARNING "%s: Oversized Ethernet frame " 404 dev_warn(&dev->dev,
406 "spanned multiple buffers, status %8.8x!\n", 405 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
407 dev->name, status); 406 status);
408 tp->stats.rx_length_errors++; 407 tp->stats.rx_length_errors++;
409 } 408 }
410 } else { 409 } else {
411 /* There was a fatal error. */ 410 /* There was a fatal error. */
412 if (tulip_debug > 2) 411 if (tulip_debug > 2)
413 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", 412 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
414 dev->name, status); 413 dev->name, status);
415 tp->stats.rx_errors++; /* end of a packet.*/ 414 tp->stats.rx_errors++; /* end of a packet.*/
416 if (pkt_len > 1518 || 415 if (pkt_len > 1518 ||
417 (status & RxDescRunt)) 416 (status & RxDescRunt))
@@ -450,12 +449,11 @@ static int tulip_rx(struct net_device *dev)
450#ifndef final_version 449#ifndef final_version
451 if (tp->rx_buffers[entry].mapping != 450 if (tp->rx_buffers[entry].mapping !=
452 le32_to_cpu(tp->rx_ring[entry].buffer1)) { 451 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
453 printk(KERN_ERR "%s: Internal fault: The skbuff addresses " 452 dev_err(&dev->dev,
454 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n", 453 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
455 dev->name, 454 le32_to_cpu(tp->rx_ring[entry].buffer1),
456 le32_to_cpu(tp->rx_ring[entry].buffer1), 455 (long long)tp->rx_buffers[entry].mapping,
457 (long long)tp->rx_buffers[entry].mapping, 456 skb->head, temp);
458 skb->head, temp);
459 } 457 }
460#endif 458#endif
461 459
@@ -569,7 +567,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
569#endif /* CONFIG_TULIP_NAPI */ 567#endif /* CONFIG_TULIP_NAPI */
570 568
571 if (tulip_debug > 4) 569 if (tulip_debug > 4)
572 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", 570 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
573 dev->name, csr5, ioread32(ioaddr + CSR5)); 571 dev->name, csr5, ioread32(ioaddr + CSR5));
574 572
575 573
@@ -601,8 +599,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
601 /* There was an major error, log it. */ 599 /* There was an major error, log it. */
602#ifndef final_version 600#ifndef final_version
603 if (tulip_debug > 1) 601 if (tulip_debug > 1)
604 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", 602 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
605 dev->name, status); 603 dev->name, status);
606#endif 604#endif
607 tp->stats.tx_errors++; 605 tp->stats.tx_errors++;
608 if (status & 0x4104) tp->stats.tx_aborted_errors++; 606 if (status & 0x4104) tp->stats.tx_aborted_errors++;
@@ -631,8 +629,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
631 629
632#ifndef final_version 630#ifndef final_version
633 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { 631 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
634 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n", 632 dev_err(&dev->dev,
635 dev->name, dirty_tx, tp->cur_tx); 633 "Out-of-sync dirty pointer, %d vs. %d\n",
634 dirty_tx, tp->cur_tx);
636 dirty_tx += TX_RING_SIZE; 635 dirty_tx += TX_RING_SIZE;
637 } 636 }
638#endif 637#endif
@@ -643,9 +642,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
643 tp->dirty_tx = dirty_tx; 642 tp->dirty_tx = dirty_tx;
644 if (csr5 & TxDied) { 643 if (csr5 & TxDied) {
645 if (tulip_debug > 2) 644 if (tulip_debug > 2)
646 printk(KERN_WARNING "%s: The transmitter stopped." 645 dev_warn(&dev->dev,
647 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n", 646 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
648 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6); 647 csr5, ioread32(ioaddr + CSR6),
648 tp->csr6);
649 tulip_restart_rxtx(tp); 649 tulip_restart_rxtx(tp);
650 } 650 }
651 spin_unlock(&tp->lock); 651 spin_unlock(&tp->lock);
@@ -696,8 +696,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
696 * to the 21142/3 docs that is). 696 * to the 21142/3 docs that is).
697 * -- rmk 697 * -- rmk
698 */ 698 */
699 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n", 699 dev_err(&dev->dev,
700 dev->name, tp->nir, error); 700 "(%lu) System Error occurred (%d)\n",
701 tp->nir, error);
701 } 702 }
702 /* Clear all error sources, included undocumented ones! */ 703 /* Clear all error sources, included undocumented ones! */
703 iowrite32(0x0800f7ba, ioaddr + CSR5); 704 iowrite32(0x0800f7ba, ioaddr + CSR5);
@@ -706,16 +707,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
706 if (csr5 & TimerInt) { 707 if (csr5 & TimerInt) {
707 708
708 if (tulip_debug > 2) 709 if (tulip_debug > 2)
709 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n", 710 dev_err(&dev->dev,
710 dev->name, csr5); 711 "Re-enabling interrupts, %08x\n",
712 csr5);
711 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); 713 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
712 tp->ttimer = 0; 714 tp->ttimer = 0;
713 oi++; 715 oi++;
714 } 716 }
715 if (tx > maxtx || rx > maxrx || oi > maxoi) { 717 if (tx > maxtx || rx > maxrx || oi > maxoi) {
716 if (tulip_debug > 1) 718 if (tulip_debug > 1)
717 printk(KERN_WARNING "%s: Too much work during an interrupt, " 719 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
718 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi); 720 csr5, tp->nir, tx, rx, oi);
719 721
720 /* Acknowledge all interrupt sources. */ 722 /* Acknowledge all interrupt sources. */
721 iowrite32(0x8001ffff, ioaddr + CSR5); 723 iowrite32(0x8001ffff, ioaddr + CSR5);
@@ -764,14 +766,18 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
764 entry = tp->dirty_rx % RX_RING_SIZE; 766 entry = tp->dirty_rx % RX_RING_SIZE;
765 if (tp->rx_buffers[entry].skb == NULL) { 767 if (tp->rx_buffers[entry].skb == NULL) {
766 if (tulip_debug > 1) 768 if (tulip_debug > 1)
767 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx); 769 dev_warn(&dev->dev,
770 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
771 tp->nir, tp->cur_rx, tp->ttimer, rx);
768 if (tp->chip_id == LC82C168) { 772 if (tp->chip_id == LC82C168) {
769 iowrite32(0x00, ioaddr + CSR7); 773 iowrite32(0x00, ioaddr + CSR7);
770 mod_timer(&tp->timer, RUN_AT(HZ/50)); 774 mod_timer(&tp->timer, RUN_AT(HZ/50));
771 } else { 775 } else {
772 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) { 776 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
773 if (tulip_debug > 1) 777 if (tulip_debug > 1)
774 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir); 778 dev_warn(&dev->dev,
779 "in rx suspend mode: (%lu) set timer\n",
780 tp->nir);
775 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt, 781 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
776 ioaddr + CSR7); 782 ioaddr + CSR7);
777 iowrite32(TimerInt, ioaddr + CSR5); 783 iowrite32(TimerInt, ioaddr + CSR5);
@@ -787,8 +793,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
787 } 793 }
788 794
789 if (tulip_debug > 4) 795 if (tulip_debug > 4)
790 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n", 796 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
791 dev->name, ioread32(ioaddr + CSR5)); 797 dev->name, ioread32(ioaddr + CSR5));
792 798
793 return IRQ_HANDLED; 799 return IRQ_HANDLED;
794} 800}
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index d8fda83705b..68b170ae4d1 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -182,9 +182,8 @@ void tulip_select_media(struct net_device *dev, int startup)
182 switch (mleaf->type) { 182 switch (mleaf->type) {
183 case 0: /* 21140 non-MII xcvr. */ 183 case 0: /* 21140 non-MII xcvr. */
184 if (tulip_debug > 1) 184 if (tulip_debug > 1)
185 printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver" 185 printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver with control setting %02x\n",
186 " with control setting %2.2x.\n", 186 dev->name, p[1]);
187 dev->name, p[1]);
188 dev->if_port = p[0]; 187 dev->if_port = p[0];
189 if (startup) 188 if (startup)
190 iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); 189 iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
@@ -205,15 +204,15 @@ void tulip_select_media(struct net_device *dev, int startup)
205 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; 204 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
206 unsigned char *rst = rleaf->leafdata; 205 unsigned char *rst = rleaf->leafdata;
207 if (tulip_debug > 1) 206 if (tulip_debug > 1)
208 printk(KERN_DEBUG "%s: Resetting the transceiver.\n", 207 printk(KERN_DEBUG "%s: Resetting the transceiver\n",
209 dev->name); 208 dev->name);
210 for (i = 0; i < rst[0]; i++) 209 for (i = 0; i < rst[0]; i++)
211 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); 210 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
212 } 211 }
213 if (tulip_debug > 1) 212 if (tulip_debug > 1)
214 printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control " 213 printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control %04x/%04x\n",
215 "%4.4x/%4.4x.\n", 214 dev->name, medianame[dev->if_port],
216 dev->name, medianame[dev->if_port], setup[0], setup[1]); 215 setup[0], setup[1]);
217 if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ 216 if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
218 csr13val = setup[0]; 217 csr13val = setup[0];
219 csr14val = setup[1]; 218 csr14val = setup[1];
@@ -240,8 +239,8 @@ void tulip_select_media(struct net_device *dev, int startup)
240 if (startup) iowrite32(csr13val, ioaddr + CSR13); 239 if (startup) iowrite32(csr13val, ioaddr + CSR13);
241 } 240 }
242 if (tulip_debug > 1) 241 if (tulip_debug > 1)
243 printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n", 242 printk(KERN_DEBUG "%s: Setting CSR15 to %08x/%08x\n",
244 dev->name, csr15dir, csr15val); 243 dev->name, csr15dir, csr15val);
245 if (mleaf->type == 4) 244 if (mleaf->type == 4)
246 new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); 245 new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
247 else 246 else
@@ -317,8 +316,9 @@ void tulip_select_media(struct net_device *dev, int startup)
317 if (tp->mii_advertise == 0) 316 if (tp->mii_advertise == 0)
318 tp->mii_advertise = tp->advertising[phy_num]; 317 tp->mii_advertise = tp->advertising[phy_num];
319 if (tulip_debug > 1) 318 if (tulip_debug > 1)
320 printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n", 319 printk(KERN_DEBUG "%s: Advertising %04x on MII %d\n",
321 dev->name, tp->mii_advertise, tp->phys[phy_num]); 320 dev->name, tp->mii_advertise,
321 tp->phys[phy_num]);
322 tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); 322 tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
323 } 323 }
324 break; 324 break;
@@ -335,8 +335,8 @@ void tulip_select_media(struct net_device *dev, int startup)
335 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; 335 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
336 unsigned char *rst = rleaf->leafdata; 336 unsigned char *rst = rleaf->leafdata;
337 if (tulip_debug > 1) 337 if (tulip_debug > 1)
338 printk(KERN_DEBUG "%s: Resetting the transceiver.\n", 338 printk(KERN_DEBUG "%s: Resetting the transceiver\n",
339 dev->name); 339 dev->name);
340 for (i = 0; i < rst[0]; i++) 340 for (i = 0; i < rst[0]; i++)
341 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); 341 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
342 } 342 }
@@ -344,20 +344,20 @@ void tulip_select_media(struct net_device *dev, int startup)
344 break; 344 break;
345 } 345 }
346 default: 346 default:
347 printk(KERN_DEBUG "%s: Invalid media table selection %d.\n", 347 printk(KERN_DEBUG "%s: Invalid media table selection %d\n",
348 dev->name, mleaf->type); 348 dev->name, mleaf->type);
349 new_csr6 = 0x020E0000; 349 new_csr6 = 0x020E0000;
350 } 350 }
351 if (tulip_debug > 1) 351 if (tulip_debug > 1)
352 printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n", 352 printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %02x\n",
353 dev->name, medianame[dev->if_port], 353 dev->name, medianame[dev->if_port],
354 ioread32(ioaddr + CSR12) & 0xff); 354 ioread32(ioaddr + CSR12) & 0xff);
355 } else if (tp->chip_id == LC82C168) { 355 } else if (tp->chip_id == LC82C168) {
356 if (startup && ! tp->medialock) 356 if (startup && ! tp->medialock)
357 dev->if_port = tp->mii_cnt ? 11 : 0; 357 dev->if_port = tp->mii_cnt ? 11 : 0;
358 if (tulip_debug > 1) 358 if (tulip_debug > 1)
359 printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n", 359 printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s\n",
360 dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]); 360 dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
361 if (tp->mii_cnt) { 361 if (tp->mii_cnt) {
362 new_csr6 = 0x810C0000; 362 new_csr6 = 0x810C0000;
363 iowrite32(0x0001, ioaddr + CSR15); 363 iowrite32(0x0001, ioaddr + CSR15);
@@ -388,10 +388,9 @@ void tulip_select_media(struct net_device *dev, int startup)
388 } else 388 } else
389 new_csr6 = 0x03860000; 389 new_csr6 = 0x03860000;
390 if (tulip_debug > 1) 390 if (tulip_debug > 1)
391 printk(KERN_DEBUG "%s: No media description table, assuming " 391 printk(KERN_DEBUG "%s: No media description table, assuming %s transceiver, CSR12 %02x\n",
392 "%s transceiver, CSR12 %2.2x.\n", 392 dev->name, medianame[dev->if_port],
393 dev->name, medianame[dev->if_port], 393 ioread32(ioaddr + CSR12));
394 ioread32(ioaddr + CSR12));
395 } 394 }
396 395
397 tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); 396 tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
@@ -415,16 +414,17 @@ int tulip_check_duplex(struct net_device *dev)
415 bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); 414 bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
416 lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA); 415 lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
417 if (tulip_debug > 1) 416 if (tulip_debug > 1)
418 printk(KERN_INFO "%s: MII status %4.4x, Link partner report " 417 dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n",
419 "%4.4x.\n", dev->name, bmsr, lpa); 418 bmsr, lpa);
420 if (bmsr == 0xffff) 419 if (bmsr == 0xffff)
421 return -2; 420 return -2;
422 if ((bmsr & BMSR_LSTATUS) == 0) { 421 if ((bmsr & BMSR_LSTATUS) == 0) {
423 int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); 422 int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
424 if ((new_bmsr & BMSR_LSTATUS) == 0) { 423 if ((new_bmsr & BMSR_LSTATUS) == 0) {
425 if (tulip_debug > 1) 424 if (tulip_debug > 1)
426 printk(KERN_INFO "%s: No link beat on the MII interface," 425 dev_info(&dev->dev,
427 " status %4.4x.\n", dev->name, new_bmsr); 426 "No link beat on the MII interface, status %04x\n",
427 new_bmsr);
428 return -1; 428 return -1;
429 } 429 }
430 } 430 }
@@ -443,10 +443,10 @@ int tulip_check_duplex(struct net_device *dev)
443 tulip_restart_rxtx(tp); 443 tulip_restart_rxtx(tp);
444 444
445 if (tulip_debug > 0) 445 if (tulip_debug > 0)
446 printk(KERN_INFO "%s: Setting %s-duplex based on MII" 446 dev_info(&dev->dev,
447 "#%d link partner capability of %4.4x.\n", 447 "Setting %s-duplex based on MII#%d link partner capability of %04x\n",
448 dev->name, tp->full_duplex ? "full" : "half", 448 tp->full_duplex ? "full" : "half",
449 tp->phys[0], lpa); 449 tp->phys[0], lpa);
450 return 1; 450 return 1;
451 } 451 }
452 452
@@ -501,15 +501,13 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
501 501
502 tp->phys[phy_idx++] = phy; 502 tp->phys[phy_idx++] = phy;
503 503
504 printk (KERN_INFO "tulip%d: MII transceiver #%d " 504 pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n",
505 "config %4.4x status %4.4x advertising %4.4x.\n",
506 board_idx, phy, mii_reg0, mii_status, mii_advert); 505 board_idx, phy, mii_reg0, mii_status, mii_advert);
507 506
508 /* Fixup for DLink with miswired PHY. */ 507 /* Fixup for DLink with miswired PHY. */
509 if (mii_advert != to_advert) { 508 if (mii_advert != to_advert) {
510 printk (KERN_DEBUG "tulip%d: Advertising %4.4x on PHY %d," 509 printk(KERN_DEBUG "tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
511 " previously advertising %4.4x.\n", 510 board_idx, to_advert, phy, mii_advert);
512 board_idx, to_advert, phy, mii_advert);
513 tulip_mdio_write (dev, phy, 4, to_advert); 511 tulip_mdio_write (dev, phy, 4, to_advert);
514 } 512 }
515 513
@@ -554,7 +552,7 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
554 } 552 }
555 tp->mii_cnt = phy_idx; 553 tp->mii_cnt = phy_idx;
556 if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) { 554 if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
557 printk (KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n", 555 pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n",
558 board_idx); 556 board_idx);
559 tp->phys[0] = 1; 557 tp->phys[0] = 1;
560 } 558 }
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index d3253ed09df..966efa1a27d 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -40,8 +40,8 @@ void pnic_do_nway(struct net_device *dev)
40 new_csr6 |= 0x00000200; 40 new_csr6 |= 0x00000200;
41 } 41 }
42 if (tulip_debug > 1) 42 if (tulip_debug > 1)
43 printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n", 43 printk(KERN_DEBUG "%s: PNIC autonegotiated status %08x, %s\n",
44 dev->name, phy_reg, medianame[dev->if_port]); 44 dev->name, phy_reg, medianame[dev->if_port]);
45 if (tp->csr6 != new_csr6) { 45 if (tp->csr6 != new_csr6) {
46 tp->csr6 = new_csr6; 46 tp->csr6 = new_csr6;
47 /* Restart Tx */ 47 /* Restart Tx */
@@ -58,8 +58,8 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
58 int phy_reg = ioread32(ioaddr + 0xB8); 58 int phy_reg = ioread32(ioaddr + 0xB8);
59 59
60 if (tulip_debug > 1) 60 if (tulip_debug > 1)
61 printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n", 61 printk(KERN_DEBUG "%s: PNIC link changed state %08x, CSR5 %08x\n",
62 dev->name, phy_reg, csr5); 62 dev->name, phy_reg, csr5);
63 if (ioread32(ioaddr + CSR5) & TPLnkFail) { 63 if (ioread32(ioaddr + CSR5) & TPLnkFail) {
64 iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7); 64 iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
65 /* If we use an external MII, then we mustn't use the 65 /* If we use an external MII, then we mustn't use the
@@ -114,9 +114,8 @@ void pnic_timer(unsigned long data)
114 int csr5 = ioread32(ioaddr + CSR5); 114 int csr5 = ioread32(ioaddr + CSR5);
115 115
116 if (tulip_debug > 1) 116 if (tulip_debug > 1)
117 printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s " 117 printk(KERN_DEBUG "%s: PNIC timer PHY status %08x, %s CSR5 %08x\n",
118 "CSR5 %8.8x.\n", 118 dev->name, phy_reg, medianame[dev->if_port], csr5);
119 dev->name, phy_reg, medianame[dev->if_port], csr5);
120 if (phy_reg & 0x04000000) { /* Remote link fault */ 119 if (phy_reg & 0x04000000) { /* Remote link fault */
121 iowrite32(0x0201F078, ioaddr + 0xB8); 120 iowrite32(0x0201F078, ioaddr + 0xB8);
122 next_tick = 1*HZ; 121 next_tick = 1*HZ;
@@ -126,10 +125,11 @@ void pnic_timer(unsigned long data)
126 next_tick = 60*HZ; 125 next_tick = 60*HZ;
127 } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */ 126 } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
128 if (tulip_debug > 1) 127 if (tulip_debug > 1)
129 printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, " 128 printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
130 "CSR5 %8.8x, PHY %3.3x.\n", 129 dev->name, medianame[dev->if_port],
131 dev->name, medianame[dev->if_port], csr12, 130 csr12,
132 ioread32(ioaddr + CSR5), ioread32(ioaddr + 0xB8)); 131 ioread32(ioaddr + CSR5),
132 ioread32(ioaddr + 0xB8));
133 next_tick = 3*HZ; 133 next_tick = 3*HZ;
134 if (tp->medialock) { 134 if (tp->medialock) {
135 } else if (tp->nwayset && (dev->if_port & 1)) { 135 } else if (tp->nwayset && (dev->if_port & 1)) {
@@ -151,10 +151,11 @@ void pnic_timer(unsigned long data)
151 tulip_restart_rxtx(tp); 151 tulip_restart_rxtx(tp);
152 dev->trans_start = jiffies; 152 dev->trans_start = jiffies;
153 if (tulip_debug > 1) 153 if (tulip_debug > 1)
154 printk(KERN_INFO "%s: Changing PNIC configuration to %s " 154 dev_info(&dev->dev,
155 "%s-duplex, CSR6 %8.8x.\n", 155 "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
156 dev->name, medianame[dev->if_port], 156 medianame[dev->if_port],
157 tp->full_duplex ? "full" : "half", new_csr6); 157 tp->full_duplex ? "full" : "half",
158 new_csr6);
158 } 159 }
159 } 160 }
160 } 161 }
@@ -162,7 +163,7 @@ too_good_connection:
162 mod_timer(&tp->timer, RUN_AT(next_tick)); 163 mod_timer(&tp->timer, RUN_AT(next_tick));
163 if(!ioread32(ioaddr + CSR7)) { 164 if(!ioread32(ioaddr + CSR7)) {
164 if (tulip_debug > 1) 165 if (tulip_debug > 1)
165 printk(KERN_INFO "%s: sw timer wakeup.\n", dev->name); 166 dev_info(&dev->dev, "sw timer wakeup\n");
166 disable_irq(dev->irq); 167 disable_irq(dev->irq);
167 tulip_refill_rx(dev); 168 tulip_refill_rx(dev);
168 enable_irq(dev->irq); 169 enable_irq(dev->irq);
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index d8418694bf4..b8197666021 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -87,8 +87,8 @@ void pnic2_timer(unsigned long data)
87 int next_tick = 60*HZ; 87 int next_tick = 60*HZ;
88 88
89 if (tulip_debug > 3) 89 if (tulip_debug > 3)
90 printk(KERN_INFO"%s: PNIC2 negotiation status %8.8x.\n", 90 dev_info(&dev->dev, "PNIC2 negotiation status %08x\n",
91 dev->name,ioread32(ioaddr + CSR12)); 91 ioread32(ioaddr + CSR12));
92 92
93 if (next_tick) { 93 if (next_tick) {
94 mod_timer(&tp->timer, RUN_AT(next_tick)); 94 mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -125,8 +125,8 @@ void pnic2_start_nway(struct net_device *dev)
125 csr14 |= 0x00001184; 125 csr14 |= 0x00001184;
126 126
127 if (tulip_debug > 1) 127 if (tulip_debug > 1)
128 printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, " 128 printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, csr14=%08x\n",
129 "csr14=%8.8x.\n", dev->name, csr14); 129 dev->name, csr14);
130 130
131 /* tell pnic2_lnk_change we are doing an nway negotiation */ 131 /* tell pnic2_lnk_change we are doing an nway negotiation */
132 dev->if_port = 0; 132 dev->if_port = 0;
@@ -137,8 +137,8 @@ void pnic2_start_nway(struct net_device *dev)
137 137
138 tp->csr6 = ioread32(ioaddr + CSR6); 138 tp->csr6 = ioread32(ioaddr + CSR6);
139 if (tulip_debug > 1) 139 if (tulip_debug > 1)
140 printk(KERN_DEBUG "%s: On Entry to Nway, " 140 printk(KERN_DEBUG "%s: On Entry to Nway, csr6=%08x\n",
141 "csr6=%8.8x.\n", dev->name, tp->csr6); 141 dev->name, tp->csr6);
142 142
143 /* mask off any bits not to touch 143 /* mask off any bits not to touch
144 * comment at top of file explains mask value 144 * comment at top of file explains mask value
@@ -181,9 +181,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
181 int csr12 = ioread32(ioaddr + CSR12); 181 int csr12 = ioread32(ioaddr + CSR12);
182 182
183 if (tulip_debug > 1) 183 if (tulip_debug > 1)
184 printk(KERN_INFO"%s: PNIC2 link status interrupt %8.8x, " 184 dev_info(&dev->dev,
185 " CSR5 %x, %8.8x.\n", dev->name, csr12, 185 "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n",
186 csr5, ioread32(ioaddr + CSR14)); 186 csr12, csr5, ioread32(ioaddr + CSR14));
187 187
188 /* If NWay finished and we have a negotiated partner capability. 188 /* If NWay finished and we have a negotiated partner capability.
189 * check bits 14:12 for bit pattern 101 - all is good 189 * check bits 14:12 for bit pattern 101 - all is good
@@ -215,9 +215,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
215 else if (negotiated & 0x0020) dev->if_port = 0; 215 else if (negotiated & 0x0020) dev->if_port = 0;
216 else { 216 else {
217 if (tulip_debug > 1) 217 if (tulip_debug > 1)
218 printk(KERN_INFO "%s: funny autonegotiate result " 218 dev_info(&dev->dev,
219 "csr12 %8.8x advertising %4.4x\n", 219 "funny autonegotiate result csr12 %08x advertising %04x\n",
220 dev->name, csr12, tp->sym_advertise); 220 csr12, tp->sym_advertise);
221 tp->nwayset = 0; 221 tp->nwayset = 0;
222 /* so check if 100baseTx link state is okay */ 222 /* so check if 100baseTx link state is okay */
223 if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180)) 223 if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
@@ -231,10 +231,11 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
231 231
232 if (tulip_debug > 1) { 232 if (tulip_debug > 1) {
233 if (tp->nwayset) 233 if (tp->nwayset)
234 printk(KERN_INFO "%s: Switching to %s based on link " 234 dev_info(&dev->dev,
235 "negotiation %4.4x & %4.4x = %4.4x.\n", 235 "Switching to %s based on link negotiation %04x & %04x = %04x\n",
236 dev->name, medianame[dev->if_port], 236 medianame[dev->if_port],
237 tp->sym_advertise, tp->lpar, negotiated); 237 tp->sym_advertise, tp->lpar,
238 negotiated);
238 } 239 }
239 240
240 /* remember to turn off bit 7 - autonegotiate 241 /* remember to turn off bit 7 - autonegotiate
@@ -270,9 +271,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
270 iowrite32(1, ioaddr + CSR13); 271 iowrite32(1, ioaddr + CSR13);
271 272
272 if (tulip_debug > 2) 273 if (tulip_debug > 2)
273 printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 " 274 printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
274 "%8.8x.\n", dev->name, tp->csr6, 275 dev->name, tp->csr6,
275 ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12)); 276 ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
276 277
277 /* now the following actually writes out the 278 /* now the following actually writes out the
278 * new csr6 values 279 * new csr6 values
@@ -282,9 +283,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
282 return; 283 return;
283 284
284 } else { 285 } else {
285 printk(KERN_INFO "%s: Autonegotiation failed, " 286 dev_info(&dev->dev,
286 "using %s, link beat status %4.4x.\n", 287 "Autonegotiation failed, using %s, link beat status %04x\n",
287 dev->name, medianame[dev->if_port], csr12); 288 medianame[dev->if_port], csr12);
288 289
289 /* remember to turn off bit 7 - autonegotiate 290 /* remember to turn off bit 7 - autonegotiate
290 * enable so we don't forget 291 * enable so we don't forget
@@ -339,9 +340,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
339 /* we are at 100mb and a potential link change occurred */ 340 /* we are at 100mb and a potential link change occurred */
340 341
341 if (tulip_debug > 1) 342 if (tulip_debug > 1)
342 printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n", 343 dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
343 dev->name, medianame[dev->if_port], 344 medianame[dev->if_port],
344 (csr12 & 2) ? "failed" : "good"); 345 (csr12 & 2) ? "failed" : "good");
345 346
346 /* check 100 link beat */ 347 /* check 100 link beat */
347 348
@@ -364,9 +365,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
364 /* we are at 10mb and a potential link change occurred */ 365 /* we are at 10mb and a potential link change occurred */
365 366
366 if (tulip_debug > 1) 367 if (tulip_debug > 1)
367 printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n", 368 dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
368 dev->name, medianame[dev->if_port], 369 medianame[dev->if_port],
369 (csr12 & 4) ? "failed" : "good"); 370 (csr12 & 4) ? "failed" : "good");
370 371
371 372
372 tp->nway = 0; 373 tp->nway = 0;
@@ -385,7 +386,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
385 386
386 387
387 if (tulip_debug > 1) 388 if (tulip_debug > 1)
388 printk(KERN_INFO"%s: PNIC2 Link Change Default?\n",dev->name); 389 dev_info(&dev->dev, "PNIC2 Link Change Default?\n");
389 390
390 /* if all else fails default to trying 10baseT-HD */ 391 /* if all else fails default to trying 10baseT-HD */
391 dev->if_port = 0; 392 dev->if_port = 0;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index a0e08422308..36c2725ec88 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -28,11 +28,11 @@ void tulip_media_task(struct work_struct *work)
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (tulip_debug > 2) { 30 if (tulip_debug > 2) {
31 printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode" 31 printk(KERN_DEBUG "%s: Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
32 " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n", 32 dev->name, medianame[dev->if_port],
33 dev->name, medianame[dev->if_port], ioread32(ioaddr + CSR5), 33 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
34 ioread32(ioaddr + CSR6), csr12, ioread32(ioaddr + CSR13), 34 csr12, ioread32(ioaddr + CSR13),
35 ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); 35 ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
36 } 36 }
37 switch (tp->chip_id) { 37 switch (tp->chip_id) {
38 case DC21140: 38 case DC21140:
@@ -48,9 +48,9 @@ void tulip_media_task(struct work_struct *work)
48 Assume this a generic MII or SYM transceiver. */ 48 Assume this a generic MII or SYM transceiver. */
49 next_tick = 60*HZ; 49 next_tick = 60*HZ;
50 if (tulip_debug > 2) 50 if (tulip_debug > 2)
51 printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x " 51 printk(KERN_DEBUG "%s: network media monitor CSR6 %08x CSR12 0x%02x\n",
52 "CSR12 0x%2.2x.\n", 52 dev->name,
53 dev->name, ioread32(ioaddr + CSR6), csr12 & 0xff); 53 ioread32(ioaddr + CSR6), csr12 & 0xff);
54 break; 54 break;
55 } 55 }
56 mleaf = &tp->mtable->mleaf[tp->cur_index]; 56 mleaf = &tp->mtable->mleaf[tp->cur_index];
@@ -62,9 +62,8 @@ void tulip_media_task(struct work_struct *work)
62 s8 bitnum = p[offset]; 62 s8 bitnum = p[offset];
63 if (p[offset+1] & 0x80) { 63 if (p[offset+1] & 0x80) {
64 if (tulip_debug > 1) 64 if (tulip_debug > 1)
65 printk(KERN_DEBUG"%s: Transceiver monitor tick " 65 printk(KERN_DEBUG "%s: Transceiver monitor tick CSR12=%#02x, no media sense\n",
66 "CSR12=%#2.2x, no media sense.\n", 66 dev->name, csr12);
67 dev->name, csr12);
68 if (mleaf->type == 4) { 67 if (mleaf->type == 4) {
69 if (mleaf->media == 3 && (csr12 & 0x02)) 68 if (mleaf->media == 3 && (csr12 & 0x02))
70 goto select_next_media; 69 goto select_next_media;
@@ -72,16 +71,16 @@ void tulip_media_task(struct work_struct *work)
72 break; 71 break;
73 } 72 }
74 if (tulip_debug > 2) 73 if (tulip_debug > 2)
75 printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x" 74 printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
76 " bit %d is %d, expecting %d.\n", 75 dev->name, csr12, (bitnum >> 1) & 7,
77 dev->name, csr12, (bitnum >> 1) & 7, 76 (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
78 (csr12 & (1 << ((bitnum >> 1) & 7))) != 0, 77 (bitnum >= 0));
79 (bitnum >= 0));
80 /* Check that the specified bit has the proper value. */ 78 /* Check that the specified bit has the proper value. */
81 if ((bitnum < 0) != 79 if ((bitnum < 0) !=
82 ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) { 80 ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
83 if (tulip_debug > 2) 81 if (tulip_debug > 2)
84 printk(KERN_DEBUG "%s: Link beat detected for %s.\n", dev->name, 82 printk(KERN_DEBUG "%s: Link beat detected for %s\n",
83 dev->name,
85 medianame[mleaf->media & MEDIA_MASK]); 84 medianame[mleaf->media & MEDIA_MASK]);
86 if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */ 85 if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
87 goto actually_mii; 86 goto actually_mii;
@@ -100,9 +99,9 @@ void tulip_media_task(struct work_struct *work)
100 if (tulip_media_cap[dev->if_port] & MediaIsFD) 99 if (tulip_media_cap[dev->if_port] & MediaIsFD)
101 goto select_next_media; /* Skip FD entries. */ 100 goto select_next_media; /* Skip FD entries. */
102 if (tulip_debug > 1) 101 if (tulip_debug > 1)
103 printk(KERN_DEBUG "%s: No link beat on media %s," 102 printk(KERN_DEBUG "%s: No link beat on media %s, trying transceiver type %s\n",
104 " trying transceiver type %s.\n", 103 dev->name,
105 dev->name, medianame[mleaf->media & MEDIA_MASK], 104 medianame[mleaf->media & MEDIA_MASK],
106 medianame[tp->mtable->mleaf[tp->cur_index].media]); 105 medianame[tp->mtable->mleaf[tp->cur_index].media]);
107 tulip_select_media(dev, 0); 106 tulip_select_media(dev, 0);
108 /* Restart the transmit process. */ 107 /* Restart the transmit process. */
@@ -151,8 +150,8 @@ void mxic_timer(unsigned long data)
151 int next_tick = 60*HZ; 150 int next_tick = 60*HZ;
152 151
153 if (tulip_debug > 3) { 152 if (tulip_debug > 3) {
154 printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name, 153 dev_info(&dev->dev, "MXIC negotiation status %08x\n",
155 ioread32(ioaddr + CSR12)); 154 ioread32(ioaddr + CSR12));
156 } 155 }
157 if (next_tick) { 156 if (next_tick) {
158 mod_timer(&tp->timer, RUN_AT(next_tick)); 157 mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -167,11 +166,10 @@ void comet_timer(unsigned long data)
167 int next_tick = 60*HZ; 166 int next_tick = 60*HZ;
168 167
169 if (tulip_debug > 1) 168 if (tulip_debug > 1)
170 printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability " 169 printk(KERN_DEBUG "%s: Comet link status %04x partner capability %04x\n",
171 "%4.4x.\n", 170 dev->name,
172 dev->name, 171 tulip_mdio_read(dev, tp->phys[0], 1),
173 tulip_mdio_read(dev, tp->phys[0], 1), 172 tulip_mdio_read(dev, tp->phys[0], 5));
174 tulip_mdio_read(dev, tp->phys[0], 5));
175 /* mod_timer synchronizes us with potential add_timer calls 173 /* mod_timer synchronizes us with potential add_timer calls
176 * from interrupts. 174 * from interrupts.
177 */ 175 */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 20696b5d60a..e1a5f03a49c 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -41,7 +41,6 @@
41static char version[] __devinitdata = 41static char version[] __devinitdata =
42 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n"; 42 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
43 43
44
45/* A few user-configurable values. */ 44/* A few user-configurable values. */
46 45
47/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ 46/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
@@ -211,7 +210,7 @@ struct tulip_chip_table tulip_tbl[] = {
211}; 210};
212 211
213 212
214static struct pci_device_id tulip_pci_tbl[] = { 213static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
215 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 }, 214 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
216 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 }, 215 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
217 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 }, 216 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
@@ -326,7 +325,8 @@ static void tulip_up(struct net_device *dev)
326 udelay(100); 325 udelay(100);
327 326
328 if (tulip_debug > 1) 327 if (tulip_debug > 1)
329 printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq); 328 printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
329 dev->name, dev->irq);
330 330
331 iowrite32(tp->rx_ring_dma, ioaddr + CSR3); 331 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
332 iowrite32(tp->tx_ring_dma, ioaddr + CSR4); 332 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -387,8 +387,9 @@ static void tulip_up(struct net_device *dev)
387 (dev->if_port == 12 ? 0 : dev->if_port); 387 (dev->if_port == 12 ? 0 : dev->if_port);
388 for (i = 0; i < tp->mtable->leafcount; i++) 388 for (i = 0; i < tp->mtable->leafcount; i++)
389 if (tp->mtable->mleaf[i].media == looking_for) { 389 if (tp->mtable->mleaf[i].media == looking_for) {
390 printk(KERN_INFO "%s: Using user-specified media %s.\n", 390 dev_info(&dev->dev,
391 dev->name, medianame[dev->if_port]); 391 "Using user-specified media %s\n",
392 medianame[dev->if_port]);
392 goto media_picked; 393 goto media_picked;
393 } 394 }
394 } 395 }
@@ -396,8 +397,9 @@ static void tulip_up(struct net_device *dev)
396 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK; 397 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
397 for (i = 0; i < tp->mtable->leafcount; i++) 398 for (i = 0; i < tp->mtable->leafcount; i++)
398 if (tp->mtable->mleaf[i].media == looking_for) { 399 if (tp->mtable->mleaf[i].media == looking_for) {
399 printk(KERN_INFO "%s: Using EEPROM-set media %s.\n", 400 dev_info(&dev->dev,
400 dev->name, medianame[looking_for]); 401 "Using EEPROM-set media %s\n",
402 medianame[looking_for]);
401 goto media_picked; 403 goto media_picked;
402 } 404 }
403 } 405 }
@@ -424,9 +426,10 @@ media_picked:
424 if (tp->mii_cnt) { 426 if (tp->mii_cnt) {
425 tulip_select_media(dev, 1); 427 tulip_select_media(dev, 1);
426 if (tulip_debug > 1) 428 if (tulip_debug > 1)
427 printk(KERN_INFO "%s: Using MII transceiver %d, status " 429 dev_info(&dev->dev,
428 "%4.4x.\n", 430 "Using MII transceiver %d, status %04x\n",
429 dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1)); 431 tp->phys[0],
432 tulip_mdio_read(dev, tp->phys[0], 1));
430 iowrite32(csr6_mask_defstate, ioaddr + CSR6); 433 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
431 tp->csr6 = csr6_mask_hdcap; 434 tp->csr6 = csr6_mask_hdcap;
432 dev->if_port = 11; 435 dev->if_port = 11;
@@ -490,9 +493,10 @@ media_picked:
490 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */ 493 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
491 494
492 if (tulip_debug > 2) { 495 if (tulip_debug > 2) {
493 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n", 496 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
494 dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5), 497 dev->name, ioread32(ioaddr + CSR0),
495 ioread32(ioaddr + CSR6)); 498 ioread32(ioaddr + CSR5),
499 ioread32(ioaddr + CSR6));
496 } 500 }
497 501
498 /* Set the timer to switch to check for link beat and perhaps switch 502 /* Set the timer to switch to check for link beat and perhaps switch
@@ -540,27 +544,30 @@ static void tulip_tx_timeout(struct net_device *dev)
540 if (tulip_media_cap[dev->if_port] & MediaIsMII) { 544 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
541 /* Do nothing -- the media monitor should handle this. */ 545 /* Do nothing -- the media monitor should handle this. */
542 if (tulip_debug > 1) 546 if (tulip_debug > 1)
543 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n", 547 dev_warn(&dev->dev,
544 dev->name); 548 "Transmit timeout using MII device\n");
545 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 || 549 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
546 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 || 550 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
547 tp->chip_id == DM910X) { 551 tp->chip_id == DM910X) {
548 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " 552 dev_warn(&dev->dev,
549 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", 553 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
550 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), 554 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
551 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); 555 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
556 ioread32(ioaddr + CSR15));
552 tp->timeout_recovery = 1; 557 tp->timeout_recovery = 1;
553 schedule_work(&tp->media_work); 558 schedule_work(&tp->media_work);
554 goto out_unlock; 559 goto out_unlock;
555 } else if (tp->chip_id == PNIC2) { 560 } else if (tp->chip_id == PNIC2) {
556 printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, " 561 dev_warn(&dev->dev,
557 "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n", 562 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
558 dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6), 563 (int)ioread32(ioaddr + CSR5),
559 (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12)); 564 (int)ioread32(ioaddr + CSR6),
565 (int)ioread32(ioaddr + CSR7),
566 (int)ioread32(ioaddr + CSR12));
560 } else { 567 } else {
561 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 " 568 dev_warn(&dev->dev,
562 "%8.8x, resetting...\n", 569 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
563 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12)); 570 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
564 dev->if_port = 0; 571 dev->if_port = 0;
565 } 572 }
566 573
@@ -570,26 +577,26 @@ static void tulip_tx_timeout(struct net_device *dev)
570 for (i = 0; i < RX_RING_SIZE; i++) { 577 for (i = 0; i < RX_RING_SIZE; i++) {
571 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1); 578 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
572 int j; 579 int j;
573 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x " 580 printk(KERN_DEBUG
574 "%2.2x %2.2x %2.2x.\n", 581 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
575 i, (unsigned int)tp->rx_ring[i].status, 582 i,
576 (unsigned int)tp->rx_ring[i].length, 583 (unsigned int)tp->rx_ring[i].status,
577 (unsigned int)tp->rx_ring[i].buffer1, 584 (unsigned int)tp->rx_ring[i].length,
578 (unsigned int)tp->rx_ring[i].buffer2, 585 (unsigned int)tp->rx_ring[i].buffer1,
579 buf[0], buf[1], buf[2]); 586 (unsigned int)tp->rx_ring[i].buffer2,
587 buf[0], buf[1], buf[2]);
580 for (j = 0; buf[j] != 0xee && j < 1600; j++) 588 for (j = 0; buf[j] != 0xee && j < 1600; j++)
581 if (j < 100) 589 if (j < 100)
582 printk(KERN_CONT " %2.2x", buf[j]); 590 pr_cont(" %02x", buf[j]);
583 printk(KERN_CONT " j=%d.\n", j); 591 pr_cont(" j=%d\n", j);
584 } 592 }
585 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring); 593 printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring);
586 for (i = 0; i < RX_RING_SIZE; i++) 594 for (i = 0; i < RX_RING_SIZE; i++)
587 printk(KERN_CONT " %8.8x", 595 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
588 (unsigned int)tp->rx_ring[i].status); 596 printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring);
589 printk(KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
590 for (i = 0; i < TX_RING_SIZE; i++) 597 for (i = 0; i < TX_RING_SIZE; i++)
591 printk(KERN_CONT " %8.8x", (unsigned int)tp->tx_ring[i].status); 598 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
592 printk(KERN_CONT "\n"); 599 pr_cont("\n");
593 } 600 }
594#endif 601#endif
595 602
@@ -832,8 +839,9 @@ static int tulip_close (struct net_device *dev)
832 tulip_down (dev); 839 tulip_down (dev);
833 840
834 if (tulip_debug > 1) 841 if (tulip_debug > 1)
835 printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", 842 dev_printk(KERN_DEBUG, &dev->dev,
836 dev->name, ioread32 (ioaddr + CSR5)); 843 "Shutting down ethercard, status was %02x\n",
844 ioread32 (ioaddr + CSR5));
837 845
838 free_irq (dev->irq, dev); 846 free_irq (dev->irq, dev);
839 847
@@ -1073,10 +1081,10 @@ static void set_rx_mode(struct net_device *dev)
1073 filterbit &= 0x3f; 1081 filterbit &= 0x3f;
1074 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1082 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1075 if (tulip_debug > 2) 1083 if (tulip_debug > 2)
1076 printk(KERN_INFO "%s: Added filter for %pM" 1084 dev_info(&dev->dev,
1077 " %8.8x bit %d.\n", 1085 "Added filter for %pM %08x bit %d\n",
1078 dev->name, mclist->dmi_addr, 1086 mclist->dmi_addr,
1079 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); 1087 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
1080 } 1088 }
1081 if (mc_filter[0] == tp->mc_filter[0] && 1089 if (mc_filter[0] == tp->mc_filter[0] &&
1082 mc_filter[1] == tp->mc_filter[1]) 1090 mc_filter[1] == tp->mc_filter[1])
@@ -1288,9 +1296,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1288 unsigned int force_csr0 = 0; 1296 unsigned int force_csr0 = 0;
1289 1297
1290#ifndef MODULE 1298#ifndef MODULE
1291 static int did_version; /* Already printed version info. */ 1299 if (tulip_debug > 0)
1292 if (tulip_debug > 0 && did_version++ == 0) 1300 printk_once(KERN_INFO "%s", version);
1293 printk (KERN_INFO "%s", version);
1294#endif 1301#endif
1295 1302
1296 board_idx++; 1303 board_idx++;
@@ -1301,7 +1308,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1301 */ 1308 */
1302 1309
1303 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { 1310 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1304 printk (KERN_ERR PFX "skipping LMC card.\n"); 1311 pr_err(PFX "skipping LMC card\n");
1305 return -ENODEV; 1312 return -ENODEV;
1306 } 1313 }
1307 1314
@@ -1317,15 +1324,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1317 1324
1318 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 && 1325 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1319 pdev->revision < 0x30) { 1326 pdev->revision < 0x30) {
1320 printk(KERN_INFO PFX 1327 pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
1321 "skipping early DM9100 with Crc bug (use dmfe)\n");
1322 return -ENODEV; 1328 return -ENODEV;
1323 } 1329 }
1324 1330
1325 dp = pci_device_to_OF_node(pdev); 1331 dp = pci_device_to_OF_node(pdev);
1326 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) { 1332 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1327 printk(KERN_INFO PFX 1333 pr_info(PFX "skipping DM910x expansion card (use dmfe)\n");
1328 "skipping DM910x expansion card (use dmfe)\n");
1329 return -ENODEV; 1334 return -ENODEV;
1330 } 1335 }
1331 } 1336 }
@@ -1372,9 +1377,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1372 1377
1373 i = pci_enable_device(pdev); 1378 i = pci_enable_device(pdev);
1374 if (i) { 1379 if (i) {
1375 printk (KERN_ERR PFX 1380 pr_err(PFX "Cannot enable tulip board #%d, aborting\n",
1376 "Cannot enable tulip board #%d, aborting\n", 1381 board_idx);
1377 board_idx);
1378 return i; 1382 return i;
1379 } 1383 }
1380 1384
@@ -1383,22 +1387,22 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1383 /* alloc_etherdev ensures aligned and zeroed private structures */ 1387 /* alloc_etherdev ensures aligned and zeroed private structures */
1384 dev = alloc_etherdev (sizeof (*tp)); 1388 dev = alloc_etherdev (sizeof (*tp));
1385 if (!dev) { 1389 if (!dev) {
1386 printk (KERN_ERR PFX "ether device alloc failed, aborting\n"); 1390 pr_err(PFX "ether device alloc failed, aborting\n");
1387 return -ENOMEM; 1391 return -ENOMEM;
1388 } 1392 }
1389 1393
1390 SET_NETDEV_DEV(dev, &pdev->dev); 1394 SET_NETDEV_DEV(dev, &pdev->dev);
1391 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { 1395 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1392 printk (KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, " 1396 pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1393 "aborting\n", pci_name(pdev), 1397 pci_name(pdev),
1394 (unsigned long long)pci_resource_len (pdev, 0), 1398 (unsigned long long)pci_resource_len (pdev, 0),
1395 (unsigned long long)pci_resource_start (pdev, 0)); 1399 (unsigned long long)pci_resource_start (pdev, 0));
1396 goto err_out_free_netdev; 1400 goto err_out_free_netdev;
1397 } 1401 }
1398 1402
1399 /* grab all resources from both PIO and MMIO regions, as we 1403 /* grab all resources from both PIO and MMIO regions, as we
1400 * don't want anyone else messing around with our hardware */ 1404 * don't want anyone else messing around with our hardware */
1401 if (pci_request_regions (pdev, "tulip")) 1405 if (pci_request_regions (pdev, DRV_NAME))
1402 goto err_out_free_netdev; 1406 goto err_out_free_netdev;
1403 1407
1404 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size); 1408 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
@@ -1611,8 +1615,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1611 if (dev->mem_start & MEDIA_MASK) 1615 if (dev->mem_start & MEDIA_MASK)
1612 tp->default_port = dev->mem_start & MEDIA_MASK; 1616 tp->default_port = dev->mem_start & MEDIA_MASK;
1613 if (tp->default_port) { 1617 if (tp->default_port) {
1614 printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n", 1618 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1615 board_idx, medianame[tp->default_port & MEDIA_MASK]); 1619 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1616 tp->medialock = 1; 1620 tp->medialock = 1;
1617 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD) 1621 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1618 tp->full_duplex = 1; 1622 tp->full_duplex = 1;
@@ -1627,7 +1631,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1627 } 1631 }
1628 1632
1629 if (tp->flags & HAS_MEDIA_TABLE) { 1633 if (tp->flags & HAS_MEDIA_TABLE) {
1630 sprintf(dev->name, "tulip%d", board_idx); /* hack */ 1634 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1631 tulip_parse_eeprom(dev); 1635 tulip_parse_eeprom(dev);
1632 strcpy(dev->name, "eth%d"); /* un-hack */ 1636 strcpy(dev->name, "eth%d"); /* un-hack */
1633 } 1637 }
@@ -1663,20 +1667,18 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1663 if (register_netdev(dev)) 1667 if (register_netdev(dev))
1664 goto err_out_free_ring; 1668 goto err_out_free_ring;
1665 1669
1666 printk(KERN_INFO "%s: %s rev %d at " 1670 pci_set_drvdata(pdev, dev);
1671
1672 dev_info(&dev->dev,
1667#ifdef CONFIG_TULIP_MMIO 1673#ifdef CONFIG_TULIP_MMIO
1668 "MMIO" 1674 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1669#else 1675#else
1670 "Port" 1676 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1671#endif 1677#endif
1672 " %#llx,", dev->name, chip_name, pdev->revision, 1678 chip_name, pdev->revision,
1673 (unsigned long long) pci_resource_start(pdev, TULIP_BAR)); 1679 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1674 pci_set_drvdata(pdev, dev); 1680 eeprom_missing ? " EEPROM not present," : "",
1675 1681 dev->dev_addr, irq);
1676 if (eeprom_missing)
1677 printk(" EEPROM not present,");
1678 printk(" %pM", dev->dev_addr);
1679 printk(", IRQ %d.\n", irq);
1680 1682
1681 if (tp->chip_id == PNIC2) 1683 if (tp->chip_id == PNIC2)
1682 tp->link_change = pnic2_lnk_change; 1684 tp->link_change = pnic2_lnk_change;
@@ -1799,12 +1801,12 @@ static int tulip_resume(struct pci_dev *pdev)
1799 return 0; 1801 return 0;
1800 1802
1801 if ((retval = pci_enable_device(pdev))) { 1803 if ((retval = pci_enable_device(pdev))) {
1802 printk (KERN_ERR "tulip: pci_enable_device failed in resume\n"); 1804 pr_err(PFX "pci_enable_device failed in resume\n");
1803 return retval; 1805 return retval;
1804 } 1806 }
1805 1807
1806 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) { 1808 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1807 printk (KERN_ERR "tulip: request_irq failed in resume\n"); 1809 pr_err(PFX "request_irq failed in resume\n");
1808 return retval; 1810 return retval;
1809 } 1811 }
1810 1812
@@ -1874,7 +1876,7 @@ static struct pci_driver tulip_driver = {
1874static int __init tulip_init (void) 1876static int __init tulip_init (void)
1875{ 1877{
1876#ifdef MODULE 1878#ifdef MODULE
1877 printk (KERN_INFO "%s", version); 1879 pr_info("%s", version);
1878#endif 1880#endif
1879 1881
1880 /* copy module parms into globals */ 1882 /* copy module parms into globals */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fa019cabc35..dc3335d906f 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -12,6 +12,8 @@
12 12
13*/ 13*/
14 14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
15#define DRV_NAME "uli526x" 17#define DRV_NAME "uli526x"
16#define DRV_VERSION "0.9.3" 18#define DRV_VERSION "0.9.3"
17#define DRV_RELDATE "2005-7-29" 19#define DRV_RELDATE "2005-7-29"
@@ -82,9 +84,16 @@
82#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */ 84#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
83#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */ 85#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
84 86
85#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value)) 87#define ULI526X_DBUG(dbug_now, msg, value) \
88do { \
89 if (uli526x_debug || (dbug_now)) \
90 pr_err("%s %lx\n", (msg), (long) (value)); \
91} while (0)
86 92
87#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half"); 93#define SHOW_MEDIA_TYPE(mode) \
94 pr_err("Change Speed to %sMhz %s duplex\n", \
95 mode & 1 ? "100" : "10", \
96 mode & 4 ? "full" : "half");
88 97
89 98
90/* CR9 definition: SROM/MII */ 99/* CR9 definition: SROM/MII */
@@ -284,7 +293,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
284 SET_NETDEV_DEV(dev, &pdev->dev); 293 SET_NETDEV_DEV(dev, &pdev->dev);
285 294
286 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 295 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
287 printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n"); 296 pr_warning("32-bit PCI DMA not available\n");
288 err = -ENODEV; 297 err = -ENODEV;
289 goto err_out_free; 298 goto err_out_free;
290 } 299 }
@@ -295,19 +304,19 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
295 goto err_out_free; 304 goto err_out_free;
296 305
297 if (!pci_resource_start(pdev, 0)) { 306 if (!pci_resource_start(pdev, 0)) {
298 printk(KERN_ERR DRV_NAME ": I/O base is zero\n"); 307 pr_err("I/O base is zero\n");
299 err = -ENODEV; 308 err = -ENODEV;
300 goto err_out_disable; 309 goto err_out_disable;
301 } 310 }
302 311
303 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) { 312 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
304 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n"); 313 pr_err("Allocated I/O size too small\n");
305 err = -ENODEV; 314 err = -ENODEV;
306 goto err_out_disable; 315 goto err_out_disable;
307 } 316 }
308 317
309 if (pci_request_regions(pdev, DRV_NAME)) { 318 if (pci_request_regions(pdev, DRV_NAME)) {
310 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); 319 pr_err("Failed to request PCI regions\n");
311 err = -ENODEV; 320 err = -ENODEV;
312 goto err_out_disable; 321 goto err_out_disable;
313 } 322 }
@@ -382,9 +391,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
382 if (err) 391 if (err)
383 goto err_out_res; 392 goto err_out_res;
384 393
385 printk(KERN_INFO "%s: ULi M%04lx at pci%s, %pM, irq %d.\n", 394 dev_info(&dev->dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
386 dev->name,ent->driver_data >> 16,pci_name(pdev), 395 ent->driver_data >> 16, pci_name(pdev),
387 dev->dev_addr, dev->irq); 396 dev->dev_addr, dev->irq);
388 397
389 pci_set_master(pdev); 398 pci_set_master(pdev);
390 399
@@ -516,7 +525,7 @@ static void uli526x_init(struct net_device *dev)
516 } 525 }
517 } 526 }
518 if(phy_tmp == 32) 527 if(phy_tmp == 32)
519 printk(KERN_WARNING "Can not find the phy address!!!"); 528 pr_warning("Can not find the phy address!!!");
520 /* Parser SROM and media mode */ 529 /* Parser SROM and media mode */
521 db->media_mode = uli526x_media_mode; 530 db->media_mode = uli526x_media_mode;
522 531
@@ -582,7 +591,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
582 591
583 /* Too large packet check */ 592 /* Too large packet check */
584 if (skb->len > MAX_PACKET_SIZE) { 593 if (skb->len > MAX_PACKET_SIZE) {
585 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len); 594 pr_err("big packet = %d\n", (u16)skb->len);
586 dev_kfree_skb(skb); 595 dev_kfree_skb(skb);
587 return NETDEV_TX_OK; 596 return NETDEV_TX_OK;
588 } 597 }
@@ -592,7 +601,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
592 /* No Tx resource check, it never happen nromally */ 601 /* No Tx resource check, it never happen nromally */
593 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) { 602 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
594 spin_unlock_irqrestore(&db->lock, flags); 603 spin_unlock_irqrestore(&db->lock, flags);
595 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt); 604 pr_err("No Tx resource %ld\n", db->tx_packet_cnt);
596 return NETDEV_TX_BUSY; 605 return NETDEV_TX_BUSY;
597 } 606 }
598 607
@@ -1058,7 +1067,7 @@ static void uli526x_timer(unsigned long data)
1058 /* Link Failed */ 1067 /* Link Failed */
1059 ULI526X_DBUG(0, "Link Failed", tmp_cr12); 1068 ULI526X_DBUG(0, "Link Failed", tmp_cr12);
1060 netif_carrier_off(dev); 1069 netif_carrier_off(dev);
1061 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name); 1070 pr_info("%s NIC Link is Down\n",dev->name);
1062 db->link_failed = 1; 1071 db->link_failed = 1;
1063 1072
1064 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ 1073 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
@@ -1090,11 +1099,11 @@ static void uli526x_timer(unsigned long data)
1090 } 1099 }
1091 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) 1100 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
1092 { 1101 {
1093 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed); 1102 pr_info("%s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
1094 } 1103 }
1095 else 1104 else
1096 { 1105 {
1097 printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed); 1106 pr_info("%s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
1098 } 1107 }
1099 netif_carrier_on(dev); 1108 netif_carrier_on(dev);
1100 } 1109 }
@@ -1104,7 +1113,7 @@ static void uli526x_timer(unsigned long data)
1104 { 1113 {
1105 if(db->init==1) 1114 if(db->init==1)
1106 { 1115 {
1107 printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name); 1116 pr_info("%s NIC Link is Down\n",dev->name);
1108 netif_carrier_off(dev); 1117 netif_carrier_off(dev);
1109 } 1118 }
1110 } 1119 }
@@ -1230,8 +1239,7 @@ static int uli526x_resume(struct pci_dev *pdev)
1230 1239
1231 err = pci_set_power_state(pdev, PCI_D0); 1240 err = pci_set_power_state(pdev, PCI_D0);
1232 if (err) { 1241 if (err) {
1233 printk(KERN_WARNING "%s: Could not put device into D0\n", 1242 dev_warn(&dev->dev, "Could not put device into D0\n");
1234 dev->name);
1235 return err; 1243 return err;
1236 } 1244 }
1237 1245
@@ -1432,7 +1440,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1432 update_cr6(db->cr6_data, dev->base_addr); 1440 update_cr6(db->cr6_data, dev->base_addr);
1433 dev->trans_start = jiffies; 1441 dev->trans_start = jiffies;
1434 } else 1442 } else
1435 printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n"); 1443 pr_err("No Tx resource - Send_filter_frame!\n");
1436} 1444}
1437 1445
1438 1446
@@ -1783,7 +1791,7 @@ static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
1783} 1791}
1784 1792
1785 1793
1786static struct pci_device_id uli526x_pci_tbl[] = { 1794static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
1787 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID }, 1795 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
1788 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID }, 1796 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
1789 { 0, } 1797 { 0, }
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 869a7a0005f..9fb89afccf7 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -218,7 +218,7 @@ enum chip_capability_flags {
218 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8, 218 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
219}; 219};
220 220
221static const struct pci_device_id w840_pci_tbl[] = { 221static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
222 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, 222 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
223 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 223 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
224 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 224 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
@@ -376,8 +376,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
376 irq = pdev->irq; 376 irq = pdev->irq;
377 377
378 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 378 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
379 printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n", 379 pr_warning("Winbond-840: Device %s disabled due to DMA limitations\n",
380 pci_name(pdev)); 380 pci_name(pdev));
381 return -EIO; 381 return -EIO;
382 } 382 }
383 dev = alloc_etherdev(sizeof(*np)); 383 dev = alloc_etherdev(sizeof(*np));
@@ -422,8 +422,9 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
422 if (option & 0x200) 422 if (option & 0x200)
423 np->mii_if.full_duplex = 1; 423 np->mii_if.full_duplex = 1;
424 if (option & 15) 424 if (option & 15)
425 printk(KERN_INFO "%s: ignoring user supplied media type %d", 425 dev_info(&dev->dev,
426 dev->name, option & 15); 426 "ignoring user supplied media type %d",
427 option & 15);
427 } 428 }
428 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) 429 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
429 np->mii_if.full_duplex = 1; 430 np->mii_if.full_duplex = 1;
@@ -440,9 +441,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
440 if (i) 441 if (i)
441 goto err_out_cleardev; 442 goto err_out_cleardev;
442 443
443 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", 444 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
444 dev->name, pci_id_tbl[chip_idx].name, ioaddr, 445 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
445 dev->dev_addr, irq);
446 446
447 if (np->drv_flags & CanHaveMII) { 447 if (np->drv_flags & CanHaveMII) {
448 int phy, phy_idx = 0; 448 int phy, phy_idx = 0;
@@ -453,16 +453,17 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
453 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); 453 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
454 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+ 454 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
455 mdio_read(dev, phy, MII_PHYSID2); 455 mdio_read(dev, phy, MII_PHYSID2);
456 printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status " 456 dev_info(&dev->dev,
457 "0x%4.4x advertising %4.4x.\n", 457 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
458 dev->name, np->mii, phy, mii_status, np->mii_if.advertising); 458 np->mii, phy, mii_status,
459 np->mii_if.advertising);
459 } 460 }
460 } 461 }
461 np->mii_cnt = phy_idx; 462 np->mii_cnt = phy_idx;
462 np->mii_if.phy_id = np->phys[0]; 463 np->mii_if.phy_id = np->phys[0];
463 if (phy_idx == 0) { 464 if (phy_idx == 0) {
464 printk(KERN_WARNING "%s: MII PHY not found -- this device may " 465 dev_warn(&dev->dev,
465 "not operate correctly.\n", dev->name); 466 "MII PHY not found -- this device may not operate correctly\n");
466 } 467 }
467 } 468 }
468 469
@@ -644,8 +645,8 @@ static int netdev_open(struct net_device *dev)
644 goto out_err; 645 goto out_err;
645 646
646 if (debug > 1) 647 if (debug > 1)
647 printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n", 648 printk(KERN_DEBUG "%s: w89c840_open() irq %d\n",
648 dev->name, dev->irq); 649 dev->name, dev->irq);
649 650
650 if((i=alloc_ringdesc(dev))) 651 if((i=alloc_ringdesc(dev)))
651 goto out_err; 652 goto out_err;
@@ -657,7 +658,7 @@ static int netdev_open(struct net_device *dev)
657 658
658 netif_start_queue(dev); 659 netif_start_queue(dev);
659 if (debug > 2) 660 if (debug > 2)
660 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); 661 printk(KERN_DEBUG "%s: Done netdev_open()\n", dev->name);
661 662
662 /* Set the timer to check for link beat. */ 663 /* Set the timer to check for link beat. */
663 init_timer(&np->timer); 664 init_timer(&np->timer);
@@ -688,16 +689,18 @@ static int update_link(struct net_device *dev)
688 if (!(mii_reg & 0x4)) { 689 if (!(mii_reg & 0x4)) {
689 if (netif_carrier_ok(dev)) { 690 if (netif_carrier_ok(dev)) {
690 if (debug) 691 if (debug)
691 printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n", 692 dev_info(&dev->dev,
692 dev->name, np->phys[0]); 693 "MII #%d reports no link. Disabling watchdog\n",
694 np->phys[0]);
693 netif_carrier_off(dev); 695 netif_carrier_off(dev);
694 } 696 }
695 return np->csr6; 697 return np->csr6;
696 } 698 }
697 if (!netif_carrier_ok(dev)) { 699 if (!netif_carrier_ok(dev)) {
698 if (debug) 700 if (debug)
699 printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n", 701 dev_info(&dev->dev,
700 dev->name, np->phys[0]); 702 "MII #%d link is back. Enabling watchdog\n",
703 np->phys[0]);
701 netif_carrier_on(dev); 704 netif_carrier_on(dev);
702 } 705 }
703 706
@@ -729,9 +732,10 @@ static int update_link(struct net_device *dev)
729 if (fasteth) 732 if (fasteth)
730 result |= 0x20000000; 733 result |= 0x20000000;
731 if (result != np->csr6 && debug) 734 if (result != np->csr6 && debug)
732 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n", 735 dev_info(&dev->dev,
733 dev->name, fasteth ? 100 : 10, 736 "Setting %dMBit-%s-duplex based on MII#%d\n",
734 duplex ? "full" : "half", np->phys[0]); 737 fasteth ? 100 : 10, duplex ? "full" : "half",
738 np->phys[0]);
735 return result; 739 return result;
736} 740}
737 741
@@ -763,8 +767,8 @@ static inline void update_csr6(struct net_device *dev, int new)
763 767
764 limit--; 768 limit--;
765 if(!limit) { 769 if(!limit) {
766 printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n", 770 dev_info(&dev->dev,
767 dev->name, csr5); 771 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
768 break; 772 break;
769 } 773 }
770 udelay(1); 774 udelay(1);
@@ -783,10 +787,9 @@ static void netdev_timer(unsigned long data)
783 void __iomem *ioaddr = np->base_addr; 787 void __iomem *ioaddr = np->base_addr;
784 788
785 if (debug > 2) 789 if (debug > 2)
786 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " 790 printk(KERN_DEBUG "%s: Media selection timer tick, status %08x config %08x\n",
787 "config %8.8x.\n", 791 dev->name, ioread32(ioaddr + IntrStatus),
788 dev->name, ioread32(ioaddr + IntrStatus), 792 ioread32(ioaddr + NetworkConfig));
789 ioread32(ioaddr + NetworkConfig));
790 spin_lock_irq(&np->lock); 793 spin_lock_irq(&np->lock);
791 update_csr6(dev, update_link(dev)); 794 update_csr6(dev, update_link(dev));
792 spin_unlock_irq(&np->lock); 795 spin_unlock_irq(&np->lock);
@@ -899,8 +902,8 @@ static void init_registers(struct net_device *dev)
899 /* When not a module we can work around broken '486 PCI boards. */ 902 /* When not a module we can work around broken '486 PCI boards. */
900 if (boot_cpu_data.x86 <= 4) { 903 if (boot_cpu_data.x86 <= 4) {
901 i |= 0x4800; 904 i |= 0x4800;
902 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache " 905 dev_info(&dev->dev,
903 "alignment to 8 longwords.\n", dev->name); 906 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
904 } else { 907 } else {
905 i |= 0xE000; 908 i |= 0xE000;
906 } 909 }
@@ -931,22 +934,23 @@ static void tx_timeout(struct net_device *dev)
931 struct netdev_private *np = netdev_priv(dev); 934 struct netdev_private *np = netdev_priv(dev);
932 void __iomem *ioaddr = np->base_addr; 935 void __iomem *ioaddr = np->base_addr;
933 936
934 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," 937 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
935 " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus)); 938 ioread32(ioaddr + IntrStatus));
936 939
937 { 940 {
938 int i; 941 int i;
939 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 942 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
940 for (i = 0; i < RX_RING_SIZE; i++) 943 for (i = 0; i < RX_RING_SIZE; i++)
941 printk(" %8.8x", (unsigned int)np->rx_ring[i].status); 944 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
942 printk(KERN_DEBUG" Tx ring %p: ", np->tx_ring); 945 printk(KERN_CONT "\n");
946 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
943 for (i = 0; i < TX_RING_SIZE; i++) 947 for (i = 0; i < TX_RING_SIZE; i++)
944 printk(" %8.8x", np->tx_ring[i].status); 948 printk(KERN_CONT " %08x", np->tx_ring[i].status);
945 printk("\n"); 949 printk(KERN_CONT "\n");
946 } 950 }
947 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n", 951 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
948 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); 952 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
949 printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C)); 953 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
950 954
951 disable_irq(dev->irq); 955 disable_irq(dev->irq);
952 spin_lock_irq(&np->lock); 956 spin_lock_irq(&np->lock);
@@ -1055,8 +1059,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1055 dev->trans_start = jiffies; 1059 dev->trans_start = jiffies;
1056 1060
1057 if (debug > 4) { 1061 if (debug > 4) {
1058 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", 1062 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
1059 dev->name, np->cur_tx, entry); 1063 dev->name, np->cur_tx, entry);
1060 } 1064 }
1061 return NETDEV_TX_OK; 1065 return NETDEV_TX_OK;
1062} 1066}
@@ -1073,8 +1077,8 @@ static void netdev_tx_done(struct net_device *dev)
1073 if (tx_status & 0x8000) { /* There was an error, log it. */ 1077 if (tx_status & 0x8000) { /* There was an error, log it. */
1074#ifndef final_version 1078#ifndef final_version
1075 if (debug > 1) 1079 if (debug > 1)
1076 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", 1080 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
1077 dev->name, tx_status); 1081 dev->name, tx_status);
1078#endif 1082#endif
1079 np->stats.tx_errors++; 1083 np->stats.tx_errors++;
1080 if (tx_status & 0x0104) np->stats.tx_aborted_errors++; 1084 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
@@ -1086,8 +1090,8 @@ static void netdev_tx_done(struct net_device *dev)
1086 } else { 1090 } else {
1087#ifndef final_version 1091#ifndef final_version
1088 if (debug > 3) 1092 if (debug > 3)
1089 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n", 1093 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %08x\n",
1090 dev->name, entry, tx_status); 1094 dev->name, entry, tx_status);
1091#endif 1095#endif
1092 np->stats.tx_bytes += np->tx_skbuff[entry]->len; 1096 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1093 np->stats.collisions += (tx_status >> 3) & 15; 1097 np->stats.collisions += (tx_status >> 3) & 15;
@@ -1130,8 +1134,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1130 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus); 1134 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1131 1135
1132 if (debug > 4) 1136 if (debug > 4)
1133 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", 1137 printk(KERN_DEBUG "%s: Interrupt, status %04x\n",
1134 dev->name, intr_status); 1138 dev->name, intr_status);
1135 1139
1136 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) 1140 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1137 break; 1141 break;
@@ -1156,8 +1160,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1156 netdev_error(dev, intr_status); 1160 netdev_error(dev, intr_status);
1157 1161
1158 if (--work_limit < 0) { 1162 if (--work_limit < 0) {
1159 printk(KERN_WARNING "%s: Too much work at interrupt, " 1163 dev_warn(&dev->dev,
1160 "status=0x%4.4x.\n", dev->name, intr_status); 1164 "Too much work at interrupt, status=0x%04x\n",
1165 intr_status);
1161 /* Set the timer to re-enable the other interrupts after 1166 /* Set the timer to re-enable the other interrupts after
1162 10*82usec ticks. */ 1167 10*82usec ticks. */
1163 spin_lock(&np->lock); 1168 spin_lock(&np->lock);
@@ -1171,8 +1176,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1171 } while (1); 1176 } while (1);
1172 1177
1173 if (debug > 3) 1178 if (debug > 3)
1174 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1179 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x\n",
1175 dev->name, ioread32(ioaddr + IntrStatus)); 1180 dev->name, ioread32(ioaddr + IntrStatus));
1176 return IRQ_RETVAL(handled); 1181 return IRQ_RETVAL(handled);
1177} 1182}
1178 1183
@@ -1185,8 +1190,8 @@ static int netdev_rx(struct net_device *dev)
1185 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; 1190 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1186 1191
1187 if (debug > 4) { 1192 if (debug > 4) {
1188 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n", 1193 printk(KERN_DEBUG " In netdev_rx(), entry %d status %04x\n",
1189 entry, np->rx_ring[entry].status); 1194 entry, np->rx_ring[entry].status);
1190 } 1195 }
1191 1196
1192 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1197 /* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1195,24 +1200,24 @@ static int netdev_rx(struct net_device *dev)
1195 s32 status = desc->status; 1200 s32 status = desc->status;
1196 1201
1197 if (debug > 4) 1202 if (debug > 4)
1198 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", 1203 printk(KERN_DEBUG " netdev_rx() status was %08x\n",
1199 status); 1204 status);
1200 if (status < 0) 1205 if (status < 0)
1201 break; 1206 break;
1202 if ((status & 0x38008300) != 0x0300) { 1207 if ((status & 0x38008300) != 0x0300) {
1203 if ((status & 0x38000300) != 0x0300) { 1208 if ((status & 0x38000300) != 0x0300) {
1204 /* Ingore earlier buffers. */ 1209 /* Ingore earlier buffers. */
1205 if ((status & 0xffff) != 0x7fff) { 1210 if ((status & 0xffff) != 0x7fff) {
1206 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " 1211 dev_warn(&dev->dev,
1207 "multiple buffers, entry %#x status %4.4x!\n", 1212 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1208 dev->name, np->cur_rx, status); 1213 np->cur_rx, status);
1209 np->stats.rx_length_errors++; 1214 np->stats.rx_length_errors++;
1210 } 1215 }
1211 } else if (status & 0x8000) { 1216 } else if (status & 0x8000) {
1212 /* There was a fatal error. */ 1217 /* There was a fatal error. */
1213 if (debug > 2) 1218 if (debug > 2)
1214 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", 1219 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
1215 dev->name, status); 1220 dev->name, status);
1216 np->stats.rx_errors++; /* end of a packet.*/ 1221 np->stats.rx_errors++; /* end of a packet.*/
1217 if (status & 0x0890) np->stats.rx_length_errors++; 1222 if (status & 0x0890) np->stats.rx_length_errors++;
1218 if (status & 0x004C) np->stats.rx_frame_errors++; 1223 if (status & 0x004C) np->stats.rx_frame_errors++;
@@ -1225,8 +1230,8 @@ static int netdev_rx(struct net_device *dev)
1225 1230
1226#ifndef final_version 1231#ifndef final_version
1227 if (debug > 4) 1232 if (debug > 4)
1228 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" 1233 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d status %x\n",
1229 " status %x.\n", pkt_len, status); 1234 pkt_len, status);
1230#endif 1235#endif
1231 /* Check if the packet is long enough to accept without copying 1236 /* Check if the packet is long enough to accept without copying
1232 to a minimally-sized skbuff. */ 1237 to a minimally-sized skbuff. */
@@ -1251,11 +1256,10 @@ static int netdev_rx(struct net_device *dev)
1251#ifndef final_version /* Remove after testing. */ 1256#ifndef final_version /* Remove after testing. */
1252 /* You will want this info for the initial debug. */ 1257 /* You will want this info for the initial debug. */
1253 if (debug > 5) 1258 if (debug > 5)
1254 printk(KERN_DEBUG " Rx data %pM %pM" 1259 printk(KERN_DEBUG " Rx data %pM %pM %02x%02x %pI4\n",
1255 " %2.2x%2.2x %d.%d.%d.%d.\n",
1256 &skb->data[0], &skb->data[6], 1260 &skb->data[0], &skb->data[6],
1257 skb->data[12], skb->data[13], 1261 skb->data[12], skb->data[13],
1258 skb->data[14], skb->data[15], skb->data[16], skb->data[17]); 1262 &skb->data[14]);
1259#endif 1263#endif
1260 skb->protocol = eth_type_trans(skb, dev); 1264 skb->protocol = eth_type_trans(skb, dev);
1261 netif_rx(skb); 1265 netif_rx(skb);
@@ -1293,8 +1297,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
1293 void __iomem *ioaddr = np->base_addr; 1297 void __iomem *ioaddr = np->base_addr;
1294 1298
1295 if (debug > 2) 1299 if (debug > 2)
1296 printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n", 1300 printk(KERN_DEBUG "%s: Abnormal event, %08x\n",
1297 dev->name, intr_status); 1301 dev->name, intr_status);
1298 if (intr_status == 0xffffffff) 1302 if (intr_status == 0xffffffff)
1299 return; 1303 return;
1300 spin_lock(&np->lock); 1304 spin_lock(&np->lock);
@@ -1314,8 +1318,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
1314 new = 127; /* load full packet before starting */ 1318 new = 127; /* load full packet before starting */
1315 new = (np->csr6 & ~(0x7F << 14)) | (new<<14); 1319 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1316#endif 1320#endif
1317 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n", 1321 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %08x\n",
1318 dev->name, new); 1322 dev->name, new);
1319 update_csr6(dev, new); 1323 update_csr6(dev, new);
1320 } 1324 }
1321 if (intr_status & RxDied) { /* Missed a Rx frame. */ 1325 if (intr_status & RxDied) { /* Missed a Rx frame. */
@@ -1487,11 +1491,13 @@ static int netdev_close(struct net_device *dev)
1487 netif_stop_queue(dev); 1491 netif_stop_queue(dev);
1488 1492
1489 if (debug > 1) { 1493 if (debug > 1) {
1490 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x " 1494 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %08x Config %08x\n",
1491 "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus), 1495 dev->name, ioread32(ioaddr + IntrStatus),
1492 ioread32(ioaddr + NetworkConfig)); 1496 ioread32(ioaddr + NetworkConfig));
1493 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", 1497 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d\n",
1494 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); 1498 dev->name,
1499 np->cur_tx, np->dirty_tx,
1500 np->cur_rx, np->dirty_rx);
1495 } 1501 }
1496 1502
1497 /* Stop the chip's Tx and Rx processes. */ 1503 /* Stop the chip's Tx and Rx processes. */
@@ -1512,18 +1518,16 @@ static int netdev_close(struct net_device *dev)
1512 if (debug > 2) { 1518 if (debug > 2) {
1513 int i; 1519 int i;
1514 1520
1515 printk(KERN_DEBUG" Tx ring at %8.8x:\n", 1521 printk(KERN_DEBUG" Tx ring at %08x:\n", (int)np->tx_ring);
1516 (int)np->tx_ring);
1517 for (i = 0; i < TX_RING_SIZE; i++) 1522 for (i = 0; i < TX_RING_SIZE; i++)
1518 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n", 1523 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1519 i, np->tx_ring[i].length, 1524 i, np->tx_ring[i].length,
1520 np->tx_ring[i].status, np->tx_ring[i].buffer1); 1525 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1521 printk(KERN_DEBUG " Rx ring %8.8x:\n", 1526 printk(KERN_DEBUG " Rx ring %08x:\n", (int)np->rx_ring);
1522 (int)np->rx_ring);
1523 for (i = 0; i < RX_RING_SIZE; i++) { 1527 for (i = 0; i < RX_RING_SIZE; i++) {
1524 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", 1528 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1525 i, np->rx_ring[i].length, 1529 i, np->rx_ring[i].length,
1526 np->rx_ring[i].status, np->rx_ring[i].buffer1); 1530 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1527 } 1531 }
1528 } 1532 }
1529#endif /* __i386__ debugging only */ 1533#endif /* __i386__ debugging only */
@@ -1622,9 +1626,8 @@ static int w840_resume (struct pci_dev *pdev)
1622 goto out; /* device not suspended */ 1626 goto out; /* device not suspended */
1623 if (netif_running(dev)) { 1627 if (netif_running(dev)) {
1624 if ((retval = pci_enable_device(pdev))) { 1628 if ((retval = pci_enable_device(pdev))) {
1625 printk (KERN_ERR 1629 dev_err(&dev->dev,
1626 "%s: pci_enable_device failed in resume\n", 1630 "pci_enable_device failed in resume\n");
1627 dev->name);
1628 goto out; 1631 goto out;
1629 } 1632 }
1630 spin_lock_irq(&np->lock); 1633 spin_lock_irq(&np->lock);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 9924c4c7e2d..acfeeb98056 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -14,6 +14,8 @@
14 * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $ 14 * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/module.h> 19#include <linux/module.h>
18#include <linux/kernel.h> 20#include <linux/kernel.h>
19#include <linux/string.h> 21#include <linux/string.h>
@@ -144,7 +146,7 @@ static int link_status(struct xircom_private *card);
144 146
145 147
146 148
147static struct pci_device_id xircom_pci_table[] = { 149static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
148 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, 150 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
149 {0,}, 151 {0,},
150}; 152};
@@ -234,7 +236,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
234 pci_write_config_word (pdev, PCI_STATUS,tmp16); 236 pci_write_config_word (pdev, PCI_STATUS,tmp16);
235 237
236 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { 238 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
237 printk(KERN_ERR "xircom_probe: failed to allocate io-region\n"); 239 pr_err("%s: failed to allocate io-region\n", __func__);
238 return -ENODEV; 240 return -ENODEV;
239 } 241 }
240 242
@@ -245,7 +247,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
245 */ 247 */
246 dev = alloc_etherdev(sizeof(struct xircom_private)); 248 dev = alloc_etherdev(sizeof(struct xircom_private));
247 if (!dev) { 249 if (!dev) {
248 printk(KERN_ERR "xircom_probe: failed to allocate etherdev\n"); 250 pr_err("%s: failed to allocate etherdev\n", __func__);
249 goto device_fail; 251 goto device_fail;
250 } 252 }
251 private = netdev_priv(dev); 253 private = netdev_priv(dev);
@@ -253,12 +255,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
253 /* Allocate the send/receive buffers */ 255 /* Allocate the send/receive buffers */
254 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); 256 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
255 if (private->rx_buffer == NULL) { 257 if (private->rx_buffer == NULL) {
256 printk(KERN_ERR "xircom_probe: no memory for rx buffer \n"); 258 pr_err("%s: no memory for rx buffer\n", __func__);
257 goto rx_buf_fail; 259 goto rx_buf_fail;
258 } 260 }
259 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); 261 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
260 if (private->tx_buffer == NULL) { 262 if (private->tx_buffer == NULL) {
261 printk(KERN_ERR "xircom_probe: no memory for tx buffer \n"); 263 pr_err("%s: no memory for tx buffer\n", __func__);
262 goto tx_buf_fail; 264 goto tx_buf_fail;
263 } 265 }
264 266
@@ -281,11 +283,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
281 pci_set_drvdata(pdev, dev); 283 pci_set_drvdata(pdev, dev);
282 284
283 if (register_netdev(dev)) { 285 if (register_netdev(dev)) {
284 printk(KERN_ERR "xircom_probe: netdevice registration failed.\n"); 286 pr_err("%s: netdevice registration failed\n", __func__);
285 goto reg_fail; 287 goto reg_fail;
286 } 288 }
287 289
288 printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq); 290 dev_info(&dev->dev, "Xircom cardbus revision %i at irq %i\n",
291 pdev->revision, pdev->irq);
289 /* start the transmitter to get a heartbeat */ 292 /* start the transmitter to get a heartbeat */
290 /* TODO: send 2 dummy packets here */ 293 /* TODO: send 2 dummy packets here */
291 transceiver_voodoo(private); 294 transceiver_voodoo(private);
@@ -347,8 +350,10 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
347 350
348#ifdef DEBUG 351#ifdef DEBUG
349 print_binary(status); 352 print_binary(status);
350 printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]); 353 printk("tx status 0x%08x 0x%08x \n",
351 printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]); 354 card->tx_buffer[0], card->tx_buffer[4]);
355 printk("rx status 0x%08x 0x%08x \n",
356 card->rx_buffer[0], card->rx_buffer[4]);
352#endif 357#endif
353 /* Handle shared irq and hotplug */ 358 /* Handle shared irq and hotplug */
354 if (status == 0 || status == 0xffffffff) { 359 if (status == 0 || status == 0xffffffff) {
@@ -358,9 +363,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
358 363
359 if (link_status_changed(card)) { 364 if (link_status_changed(card)) {
360 int newlink; 365 int newlink;
361 printk(KERN_DEBUG "xircom_cb: Link status has changed \n"); 366 printk(KERN_DEBUG "xircom_cb: Link status has changed\n");
362 newlink = link_status(card); 367 newlink = link_status(card);
363 printk(KERN_INFO "xircom_cb: Link is %i mbit \n",newlink); 368 dev_info(&dev->dev, "Link is %i mbit\n", newlink);
364 if (newlink) 369 if (newlink)
365 netif_carrier_on(dev); 370 netif_carrier_on(dev);
366 else 371 else
@@ -457,7 +462,8 @@ static int xircom_open(struct net_device *dev)
457 struct xircom_private *xp = netdev_priv(dev); 462 struct xircom_private *xp = netdev_priv(dev);
458 int retval; 463 int retval;
459 enter("xircom_open"); 464 enter("xircom_open");
460 printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq); 465 pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
466 dev->name, dev->irq);
461 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); 467 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
462 if (retval) { 468 if (retval) {
463 leave("xircom_open - No IRQ"); 469 leave("xircom_open - No IRQ");
@@ -770,7 +776,7 @@ static void activate_receiver(struct xircom_private *card)
770 udelay(50); 776 udelay(50);
771 counter--; 777 counter--;
772 if (counter <= 0) 778 if (counter <= 0)
773 printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n"); 779 pr_err("Receiver failed to deactivate\n");
774 } 780 }
775 781
776 /* enable the receiver */ 782 /* enable the receiver */
@@ -787,7 +793,7 @@ static void activate_receiver(struct xircom_private *card)
787 udelay(50); 793 udelay(50);
788 counter--; 794 counter--;
789 if (counter <= 0) 795 if (counter <= 0)
790 printk(KERN_ERR "xircom_cb: Receiver failed to re-activate\n"); 796 pr_err("Receiver failed to re-activate\n");
791 } 797 }
792 798
793 leave("activate_receiver"); 799 leave("activate_receiver");
@@ -818,7 +824,7 @@ static void deactivate_receiver(struct xircom_private *card)
818 udelay(50); 824 udelay(50);
819 counter--; 825 counter--;
820 if (counter <= 0) 826 if (counter <= 0)
821 printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n"); 827 pr_err("Receiver failed to deactivate\n");
822 } 828 }
823 829
824 830
@@ -861,7 +867,7 @@ static void activate_transmitter(struct xircom_private *card)
861 udelay(50); 867 udelay(50);
862 counter--; 868 counter--;
863 if (counter <= 0) 869 if (counter <= 0)
864 printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n"); 870 pr_err("Transmitter failed to deactivate\n");
865 } 871 }
866 872
867 /* enable the transmitter */ 873 /* enable the transmitter */
@@ -878,7 +884,7 @@ static void activate_transmitter(struct xircom_private *card)
878 udelay(50); 884 udelay(50);
879 counter--; 885 counter--;
880 if (counter <= 0) 886 if (counter <= 0)
881 printk(KERN_ERR "xircom_cb: Transmitter failed to re-activate\n"); 887 pr_err("Transmitter failed to re-activate\n");
882 } 888 }
883 889
884 leave("activate_transmitter"); 890 leave("activate_transmitter");
@@ -909,7 +915,7 @@ static void deactivate_transmitter(struct xircom_private *card)
909 udelay(50); 915 udelay(50);
910 counter--; 916 counter--;
911 if (counter <= 0) 917 if (counter <= 0)
912 printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n"); 918 pr_err("Transmitter failed to deactivate\n");
913 } 919 }
914 920
915 921
@@ -1184,7 +1190,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1184 struct sk_buff *skb; 1190 struct sk_buff *skb;
1185 1191
1186 if (pkt_len > 1518) { 1192 if (pkt_len > 1518) {
1187 printk(KERN_ERR "xircom_cb: Packet length %i is bogus \n",pkt_len); 1193 pr_err("Packet length %i is bogus\n", pkt_len);
1188 pkt_len = 1518; 1194 pkt_len = 1518;
1189 } 1195 }
1190 1196
@@ -1222,7 +1228,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
1222 status = le32_to_cpu(card->tx_buffer[4*descnr]); 1228 status = le32_to_cpu(card->tx_buffer[4*descnr]);
1223#if 0 1229#if 0
1224 if (status & 0x8000) { /* Major error */ 1230 if (status & 0x8000) { /* Major error */
1225 printk(KERN_ERR "Major transmit error status %x \n", status); 1231 pr_err("Major transmit error status %x\n", status);
1226 card->tx_buffer[4*descnr] = 0; 1232 card->tx_buffer[4*descnr] = 0;
1227 netif_wake_queue (dev); 1233 netif_wake_queue (dev);
1228 } 1234 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2834a01bae2..5adb3d15055 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -144,6 +144,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
144 err = 0; 144 err = 0;
145 tfile->tun = tun; 145 tfile->tun = tun;
146 tun->tfile = tfile; 146 tun->tfile = tfile;
147 tun->socket.file = file;
147 dev_hold(tun->dev); 148 dev_hold(tun->dev);
148 sock_hold(tun->socket.sk); 149 sock_hold(tun->socket.sk);
149 atomic_inc(&tfile->count); 150 atomic_inc(&tfile->count);
@@ -158,6 +159,7 @@ static void __tun_detach(struct tun_struct *tun)
158 /* Detach from net device */ 159 /* Detach from net device */
159 netif_tx_lock_bh(tun->dev); 160 netif_tx_lock_bh(tun->dev);
160 tun->tfile = NULL; 161 tun->tfile = NULL;
162 tun->socket.file = NULL;
161 netif_tx_unlock_bh(tun->dev); 163 netif_tx_unlock_bh(tun->dev);
162 164
163 /* Drop read queue */ 165 /* Drop read queue */
@@ -387,7 +389,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
387 /* Notify and wake up reader process */ 389 /* Notify and wake up reader process */
388 if (tun->flags & TUN_FASYNC) 390 if (tun->flags & TUN_FASYNC)
389 kill_fasync(&tun->fasync, SIGIO, POLL_IN); 391 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
390 wake_up_interruptible(&tun->socket.wait); 392 wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
393 POLLRDNORM | POLLRDBAND);
391 return NETDEV_TX_OK; 394 return NETDEV_TX_OK;
392 395
393drop: 396drop:
@@ -743,7 +746,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
743 len = min_t(int, skb->len, len); 746 len = min_t(int, skb->len, len);
744 747
745 skb_copy_datagram_const_iovec(skb, 0, iv, total, len); 748 skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
746 total += len; 749 total += skb->len;
747 750
748 tun->dev->stats.tx_packets++; 751 tun->dev->stats.tx_packets++;
749 tun->dev->stats.tx_bytes += len; 752 tun->dev->stats.tx_bytes += len;
@@ -751,34 +754,23 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
751 return total; 754 return total;
752} 755}
753 756
754static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, 757static ssize_t tun_do_read(struct tun_struct *tun,
755 unsigned long count, loff_t pos) 758 struct kiocb *iocb, const struct iovec *iv,
759 ssize_t len, int noblock)
756{ 760{
757 struct file *file = iocb->ki_filp;
758 struct tun_file *tfile = file->private_data;
759 struct tun_struct *tun = __tun_get(tfile);
760 DECLARE_WAITQUEUE(wait, current); 761 DECLARE_WAITQUEUE(wait, current);
761 struct sk_buff *skb; 762 struct sk_buff *skb;
762 ssize_t len, ret = 0; 763 ssize_t ret = 0;
763
764 if (!tun)
765 return -EBADFD;
766 764
767 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 765 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
768 766
769 len = iov_length(iv, count);
770 if (len < 0) {
771 ret = -EINVAL;
772 goto out;
773 }
774
775 add_wait_queue(&tun->socket.wait, &wait); 767 add_wait_queue(&tun->socket.wait, &wait);
776 while (len) { 768 while (len) {
777 current->state = TASK_INTERRUPTIBLE; 769 current->state = TASK_INTERRUPTIBLE;
778 770
779 /* Read frames from the queue */ 771 /* Read frames from the queue */
780 if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) { 772 if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
781 if (file->f_flags & O_NONBLOCK) { 773 if (noblock) {
782 ret = -EAGAIN; 774 ret = -EAGAIN;
783 break; 775 break;
784 } 776 }
@@ -805,6 +797,27 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
805 current->state = TASK_RUNNING; 797 current->state = TASK_RUNNING;
806 remove_wait_queue(&tun->socket.wait, &wait); 798 remove_wait_queue(&tun->socket.wait, &wait);
807 799
800 return ret;
801}
802
803static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
804 unsigned long count, loff_t pos)
805{
806 struct file *file = iocb->ki_filp;
807 struct tun_file *tfile = file->private_data;
808 struct tun_struct *tun = __tun_get(tfile);
809 ssize_t len, ret;
810
811 if (!tun)
812 return -EBADFD;
813 len = iov_length(iv, count);
814 if (len < 0) {
815 ret = -EINVAL;
816 goto out;
817 }
818
819 ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
820 ret = min_t(ssize_t, ret, len);
808out: 821out:
809 tun_put(tun); 822 tun_put(tun);
810 return ret; 823 return ret;
@@ -847,7 +860,8 @@ static void tun_sock_write_space(struct sock *sk)
847 return; 860 return;
848 861
849 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 862 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
850 wake_up_interruptible_sync(sk->sk_sleep); 863 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
864 POLLWRNORM | POLLWRBAND);
851 865
852 tun = tun_sk(sk)->tun; 866 tun = tun_sk(sk)->tun;
853 kill_fasync(&tun->fasync, SIGIO, POLL_OUT); 867 kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +872,37 @@ static void tun_sock_destruct(struct sock *sk)
858 free_netdev(tun_sk(sk)->tun->dev); 872 free_netdev(tun_sk(sk)->tun->dev);
859} 873}
860 874
875static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
876 struct msghdr *m, size_t total_len)
877{
878 struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
879 return tun_get_user(tun, m->msg_iov, total_len,
880 m->msg_flags & MSG_DONTWAIT);
881}
882
883static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
884 struct msghdr *m, size_t total_len,
885 int flags)
886{
887 struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
888 int ret;
889 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
890 return -EINVAL;
891 ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
892 flags & MSG_DONTWAIT);
893 if (ret > total_len) {
894 m->msg_flags |= MSG_TRUNC;
895 ret = flags & MSG_TRUNC ? ret : total_len;
896 }
897 return ret;
898}
899
900/* Ops structure to mimic raw sockets with tun */
901static const struct proto_ops tun_socket_ops = {
902 .sendmsg = tun_sendmsg,
903 .recvmsg = tun_recvmsg,
904};
905
861static struct proto tun_proto = { 906static struct proto tun_proto = {
862 .name = "tun", 907 .name = "tun",
863 .owner = THIS_MODULE, 908 .owner = THIS_MODULE,
@@ -986,6 +1031,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
986 goto err_free_dev; 1031 goto err_free_dev;
987 1032
988 init_waitqueue_head(&tun->socket.wait); 1033 init_waitqueue_head(&tun->socket.wait);
1034 tun->socket.ops = &tun_socket_ops;
989 sock_init_data(&tun->socket, sk); 1035 sock_init_data(&tun->socket, sk);
990 sk->sk_write_space = tun_sock_write_space; 1036 sk->sk_write_space = tun_sock_write_space;
991 sk->sk_sndbuf = INT_MAX; 1037 sk->sk_sndbuf = INT_MAX;
@@ -1525,6 +1571,23 @@ static void tun_cleanup(void)
1525 rtnl_link_unregister(&tun_link_ops); 1571 rtnl_link_unregister(&tun_link_ops);
1526} 1572}
1527 1573
1574/* Get an underlying socket object from tun file. Returns error unless file is
1575 * attached to a device. The returned object works like a packet socket, it
1576 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1577 * holding a reference to the file for as long as the socket is in use. */
1578struct socket *tun_get_socket(struct file *file)
1579{
1580 struct tun_struct *tun;
1581 if (file->f_op != &tun_fops)
1582 return ERR_PTR(-EINVAL);
1583 tun = tun_get(file);
1584 if (!tun)
1585 return ERR_PTR(-EBADFD);
1586 tun_put(tun);
1587 return &tun->socket;
1588}
1589EXPORT_SYMBOL_GPL(tun_get_socket);
1590
1528module_init(tun_init); 1591module_init(tun_init);
1529module_exit(tun_cleanup); 1592module_exit(tun_cleanup);
1530MODULE_DESCRIPTION(DRV_DESCRIPTION); 1593MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 39f1fc650be..6e4f754c4ba 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -215,7 +215,7 @@ static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
215 * bit 8 indicates if this is a (0) copper or (1) fiber card 215 * bit 8 indicates if this is a (0) copper or (1) fiber card
216 * bits 12-16 indicate card type: (0) client and (1) server 216 * bits 12-16 indicate card type: (0) client and (1) server
217 */ 217 */
218static struct pci_device_id typhoon_pci_tbl[] = { 218static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990, 219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX }, 220 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95, 221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index eb8fe7e16c6..225f65812f2 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -37,6 +37,7 @@
37#include <asm/qe.h> 37#include <asm/qe.h>
38#include <asm/ucc.h> 38#include <asm/ucc.h>
39#include <asm/ucc_fast.h> 39#include <asm/ucc_fast.h>
40#include <asm/machdep.h>
40 41
41#include "ucc_geth.h" 42#include "ucc_geth.h"
42#include "fsl_pq_mdio.h" 43#include "fsl_pq_mdio.h"
@@ -1334,7 +1335,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1334 struct ucc_geth __iomem *ug_regs; 1335 struct ucc_geth __iomem *ug_regs;
1335 struct ucc_fast __iomem *uf_regs; 1336 struct ucc_fast __iomem *uf_regs;
1336 int ret_val; 1337 int ret_val;
1337 u32 upsmr, maccfg2, tbiBaseAddress; 1338 u32 upsmr, maccfg2;
1338 u16 value; 1339 u16 value;
1339 1340
1340 ugeth_vdbg("%s: IN", __func__); 1341 ugeth_vdbg("%s: IN", __func__);
@@ -1389,14 +1390,20 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1389 /* Note that this depends on proper setting in utbipar register. */ 1390 /* Note that this depends on proper setting in utbipar register. */
1390 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || 1391 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1391 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { 1392 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1392 tbiBaseAddress = in_be32(&ug_regs->utbipar); 1393 struct ucc_geth_info *ug_info = ugeth->ug_info;
1393 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; 1394 struct phy_device *tbiphy;
1394 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; 1395
1395 value = ugeth->phydev->bus->read(ugeth->phydev->bus, 1396 if (!ug_info->tbi_node)
1396 (u8) tbiBaseAddress, ENET_TBI_MII_CR); 1397 ugeth_warn("TBI mode requires that the device "
1398 "tree specify a tbi-handle\n");
1399
1400 tbiphy = of_phy_find_device(ug_info->tbi_node);
1401 if (!tbiphy)
1402 ugeth_warn("Could not get TBI device\n");
1403
1404 value = phy_read(tbiphy, ENET_TBI_MII_CR);
1397 value &= ~0x1000; /* Turn off autonegotiation */ 1405 value &= ~0x1000; /* Turn off autonegotiation */
1398 ugeth->phydev->bus->write(ugeth->phydev->bus, 1406 phy_write(tbiphy, ENET_TBI_MII_CR, value);
1399 (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
1400 } 1407 }
1401 1408
1402 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1409 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 22b87e64a81..7d3fa06980c 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -897,11 +897,9 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
897 f5u011_rxmode(catc, catc->rxmode); 897 f5u011_rxmode(catc, catc->rxmode);
898 } 898 }
899 dbg("Init done."); 899 dbg("Init done.");
900 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, ", 900 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
901 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", 901 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
902 usbdev->bus->bus_name, usbdev->devpath); 902 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
903 for (i = 0; i < 5; i++) printk("%2.2x:", netdev->dev_addr[i]);
904 printk("%2.2x.\n", netdev->dev_addr[i]);
905 usb_set_intfdata(intf, catc); 903 usb_set_intfdata(intf, catc);
906 904
907 SET_NETDEV_DEV(netdev, &intf->dev); 905 SET_NETDEV_DEV(netdev, &intf->dev);
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 87374317f48..6fc098fe9ff 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -1,13 +1,27 @@
1/* 1/*
2 * MosChips MCS7830 based USB 2.0 Ethernet Devices 2 * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
3 * 3 *
4 * based on usbnet.c, asix.c and the vendor provided mcs7830 driver 4 * based on usbnet.c, asix.c and the vendor provided mcs7830 driver
5 * 5 *
6 * Copyright (C) 2010 Andreas Mohr <andi@lisas.de>
6 * Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de> 7 * Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>
7 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> 8 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
8 * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> 9 * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
9 * Copyright (c) 2002-2003 TiVo Inc. 10 * Copyright (c) 2002-2003 TiVo Inc.
10 * 11 *
12 * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
13 *
14 * TODO:
15 * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
16 * - implement ethtool_ops get_pauseparam/set_pauseparam
17 * via HIF_REG_PAUSE_THRESHOLD (>= revision C only!)
18 * - implement get_eeprom/[set_eeprom]
19 * - switch PHY on/off on ifup/ifdown (perhaps in usbnet.c, via MII)
20 * - mcs7830_get_regs() handling is weird: for rev 2 we return 32 regs,
21 * can access only ~ 24, remaining user buffer is uninitialized garbage
22 * - anything else?
23 *
24 *
11 * This program is free software; you can redistribute it and/or modify 25 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 26 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 27 * the Free Software Foundation; either version 2 of the License, or
@@ -55,7 +69,7 @@
55 ADVERTISE_100HALF | ADVERTISE_10FULL | \ 69 ADVERTISE_100HALF | ADVERTISE_10FULL | \
56 ADVERTISE_10HALF | ADVERTISE_CSMA) 70 ADVERTISE_10HALF | ADVERTISE_CSMA)
57 71
58/* HIF_REG_XX coressponding index value */ 72/* HIF_REG_XX corresponding index value */
59enum { 73enum {
60 HIF_REG_MULTICAST_HASH = 0x00, 74 HIF_REG_MULTICAST_HASH = 0x00,
61 HIF_REG_PACKET_GAP1 = 0x08, 75 HIF_REG_PACKET_GAP1 = 0x08,
@@ -69,6 +83,7 @@ enum {
69 HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80, 83 HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80,
70 HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40, 84 HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40,
71 HIF_REG_CONFIG = 0x0e, 85 HIF_REG_CONFIG = 0x0e,
86 /* hmm, spec sez: "R/W", "Except bit 3" (likely TXENABLE). */
72 HIF_REG_CONFIG_CFG = 0x80, 87 HIF_REG_CONFIG_CFG = 0x80,
73 HIF_REG_CONFIG_SPEED100 = 0x40, 88 HIF_REG_CONFIG_SPEED100 = 0x40,
74 HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20, 89 HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20,
@@ -76,13 +91,24 @@ enum {
76 HIF_REG_CONFIG_TXENABLE = 0x08, 91 HIF_REG_CONFIG_TXENABLE = 0x08,
77 HIF_REG_CONFIG_SLEEPMODE = 0x04, 92 HIF_REG_CONFIG_SLEEPMODE = 0x04,
78 HIF_REG_CONFIG_ALLMULTICAST = 0x02, 93 HIF_REG_CONFIG_ALLMULTICAST = 0x02,
79 HIF_REG_CONFIG_PROMISCIOUS = 0x01, 94 HIF_REG_CONFIG_PROMISCUOUS = 0x01,
80 HIF_REG_ETHERNET_ADDR = 0x0f, 95 HIF_REG_ETHERNET_ADDR = 0x0f,
81 HIF_REG_22 = 0x15, 96 HIF_REG_FRAME_DROP_COUNTER = 0x15, /* 0..ff; reset: 0 */
82 HIF_REG_PAUSE_THRESHOLD = 0x16, 97 HIF_REG_PAUSE_THRESHOLD = 0x16,
83 HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0, 98 HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0,
84}; 99};
85 100
101/* Trailing status byte in Ethernet Rx frame */
102enum {
103 MCS7830_RX_SHORT_FRAME = 0x01, /* < 64 bytes */
104 MCS7830_RX_LENGTH_ERROR = 0x02, /* framelen != Ethernet length field */
105 MCS7830_RX_ALIGNMENT_ERROR = 0x04, /* non-even number of nibbles */
106 MCS7830_RX_CRC_ERROR = 0x08,
107 MCS7830_RX_LARGE_FRAME = 0x10, /* > 1518 bytes */
108 MCS7830_RX_FRAME_CORRECT = 0x20, /* frame is correct */
109 /* [7:6] reserved */
110};
111
86struct mcs7830_data { 112struct mcs7830_data {
87 u8 multi_filter[8]; 113 u8 multi_filter[8];
88 u8 config; 114 u8 config;
@@ -109,7 +135,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
109 return ret; 135 return ret;
110} 136}
111 137
112static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data) 138static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
113{ 139{
114 struct usb_device *xdev = dev->udev; 140 struct usb_device *xdev = dev->udev;
115 int ret; 141 int ret;
@@ -183,13 +209,43 @@ out:
183 usb_free_urb(urb); 209 usb_free_urb(urb);
184} 210}
185 211
186static int mcs7830_get_address(struct usbnet *dev) 212static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
213{
214 int ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
215 if (ret < 0)
216 return ret;
217 return 0;
218}
219
220static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr)
221{
222 int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
223
224 if (ret < 0)
225 return ret;
226 return 0;
227}
228
229static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
187{ 230{
188 int ret; 231 int ret;
189 ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, 232 struct usbnet *dev = netdev_priv(netdev);
190 dev->net->dev_addr); 233 struct sockaddr *addr = p;
234
235 if (netif_running(netdev))
236 return -EBUSY;
237
238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EINVAL;
240
241 ret = mcs7830_hif_set_mac_address(dev, addr->sa_data);
242
191 if (ret < 0) 243 if (ret < 0)
192 return ret; 244 return ret;
245
246 /* it worked --> adopt it on netdev side */
247 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
248
193 return 0; 249 return 0;
194} 250}
195 251
@@ -307,7 +363,7 @@ static int mcs7830_get_rev(struct usbnet *dev)
307{ 363{
308 u8 dummy[2]; 364 u8 dummy[2];
309 int ret; 365 int ret;
310 ret = mcs7830_get_reg(dev, HIF_REG_22, 2, dummy); 366 ret = mcs7830_get_reg(dev, HIF_REG_FRAME_DROP_COUNTER, 2, dummy);
311 if (ret > 0) 367 if (ret > 0)
312 return 2; /* Rev C or later */ 368 return 2; /* Rev C or later */
313 return 1; /* earlier revision */ 369 return 1; /* earlier revision */
@@ -331,33 +387,6 @@ static void mcs7830_rev_C_fixup(struct usbnet *dev)
331 } 387 }
332} 388}
333 389
334static int mcs7830_init_dev(struct usbnet *dev)
335{
336 int ret;
337 int retry;
338
339 /* Read MAC address from EEPROM */
340 ret = -EINVAL;
341 for (retry = 0; retry < 5 && ret; retry++)
342 ret = mcs7830_get_address(dev);
343 if (ret) {
344 dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
345 goto out;
346 }
347
348 /* Set up PHY */
349 ret = mcs7830_set_autoneg(dev, 0);
350 if (ret) {
351 dev_info(&dev->udev->dev, "Cannot set autoneg\n");
352 goto out;
353 }
354
355 mcs7830_rev_C_fixup(dev);
356 ret = 0;
357out:
358 return ret;
359}
360
361static int mcs7830_mdio_read(struct net_device *netdev, int phy_id, 390static int mcs7830_mdio_read(struct net_device *netdev, int phy_id,
362 int location) 391 int location)
363{ 392{
@@ -378,11 +407,33 @@ static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
378 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); 407 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
379} 408}
380 409
381/* credits go to asix_set_multicast */ 410static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev)
382static void mcs7830_set_multicast(struct net_device *net) 411{
412 return (struct mcs7830_data *)&dev->data;
413}
414
415static void mcs7830_hif_update_multicast_hash(struct usbnet *dev)
416{
417 struct mcs7830_data *data = mcs7830_get_data(dev);
418 mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
419 sizeof data->multi_filter,
420 data->multi_filter);
421}
422
423static void mcs7830_hif_update_config(struct usbnet *dev)
424{
425 /* implementation specific to data->config
426 (argument needs to be heap-based anyway - USB DMA!) */
427 struct mcs7830_data *data = mcs7830_get_data(dev);
428 mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
429}
430
431static void mcs7830_data_set_multicast(struct net_device *net)
383{ 432{
384 struct usbnet *dev = netdev_priv(net); 433 struct usbnet *dev = netdev_priv(net);
385 struct mcs7830_data *data = (struct mcs7830_data *)&dev->data; 434 struct mcs7830_data *data = mcs7830_get_data(dev);
435
436 memset(data->multi_filter, 0, sizeof data->multi_filter);
386 437
387 data->config = HIF_REG_CONFIG_TXENABLE; 438 data->config = HIF_REG_CONFIG_TXENABLE;
388 439
@@ -390,7 +441,7 @@ static void mcs7830_set_multicast(struct net_device *net)
390 data->config |= HIF_REG_CONFIG_ALLMULTICAST; 441 data->config |= HIF_REG_CONFIG_ALLMULTICAST;
391 442
392 if (net->flags & IFF_PROMISC) { 443 if (net->flags & IFF_PROMISC) {
393 data->config |= HIF_REG_CONFIG_PROMISCIOUS; 444 data->config |= HIF_REG_CONFIG_PROMISCUOUS;
394 } else if (net->flags & IFF_ALLMULTI || 445 } else if (net->flags & IFF_ALLMULTI ||
395 net->mc_count > MCS7830_MAX_MCAST) { 446 net->mc_count > MCS7830_MAX_MCAST) {
396 data->config |= HIF_REG_CONFIG_ALLMULTICAST; 447 data->config |= HIF_REG_CONFIG_ALLMULTICAST;
@@ -405,21 +456,51 @@ static void mcs7830_set_multicast(struct net_device *net)
405 u32 crc_bits; 456 u32 crc_bits;
406 int i; 457 int i;
407 458
408 memset(data->multi_filter, 0, sizeof data->multi_filter);
409
410 /* Build the multicast hash filter. */ 459 /* Build the multicast hash filter. */
411 for (i = 0; i < net->mc_count; i++) { 460 for (i = 0; i < net->mc_count; i++) {
412 crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; 461 crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
413 data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); 462 data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
414 mc_list = mc_list->next; 463 mc_list = mc_list->next;
415 } 464 }
465 }
466}
416 467
417 mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH, 468static int mcs7830_apply_base_config(struct usbnet *dev)
418 sizeof data->multi_filter, 469{
419 data->multi_filter); 470 int ret;
471
472 /* re-configure known MAC (suspend case etc.) */
473 ret = mcs7830_hif_set_mac_address(dev, dev->net->dev_addr);
474 if (ret) {
475 dev_info(&dev->udev->dev, "Cannot set MAC address\n");
476 goto out;
420 } 477 }
421 478
422 mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config); 479 /* Set up PHY */
480 ret = mcs7830_set_autoneg(dev, 0);
481 if (ret) {
482 dev_info(&dev->udev->dev, "Cannot set autoneg\n");
483 goto out;
484 }
485
486 mcs7830_hif_update_multicast_hash(dev);
487 mcs7830_hif_update_config(dev);
488
489 mcs7830_rev_C_fixup(dev);
490 ret = 0;
491out:
492 return ret;
493}
494
495/* credits go to asix_set_multicast */
496static void mcs7830_set_multicast(struct net_device *net)
497{
498 struct usbnet *dev = netdev_priv(net);
499
500 mcs7830_data_set_multicast(net);
501
502 mcs7830_hif_update_multicast_hash(dev);
503 mcs7830_hif_update_config(dev);
423} 504}
424 505
425static int mcs7830_get_regs_len(struct net_device *net) 506static int mcs7830_get_regs_len(struct net_device *net)
@@ -463,29 +544,6 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
463 .nway_reset = usbnet_nway_reset, 544 .nway_reset = usbnet_nway_reset,
464}; 545};
465 546
466static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
467{
468 int ret;
469 struct usbnet *dev = netdev_priv(netdev);
470 struct sockaddr *addr = p;
471
472 if (netif_running(netdev))
473 return -EBUSY;
474
475 if (!is_valid_ether_addr(addr->sa_data))
476 return -EINVAL;
477
478 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
479
480 ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
481 netdev->dev_addr);
482
483 if (ret < 0)
484 return ret;
485
486 return 0;
487}
488
489static const struct net_device_ops mcs7830_netdev_ops = { 547static const struct net_device_ops mcs7830_netdev_ops = {
490 .ndo_open = usbnet_open, 548 .ndo_open = usbnet_open,
491 .ndo_stop = usbnet_stop, 549 .ndo_stop = usbnet_stop,
@@ -495,21 +553,32 @@ static const struct net_device_ops mcs7830_netdev_ops = {
495 .ndo_validate_addr = eth_validate_addr, 553 .ndo_validate_addr = eth_validate_addr,
496 .ndo_do_ioctl = mcs7830_ioctl, 554 .ndo_do_ioctl = mcs7830_ioctl,
497 .ndo_set_multicast_list = mcs7830_set_multicast, 555 .ndo_set_multicast_list = mcs7830_set_multicast,
498 .ndo_set_mac_address = mcs7830_set_mac_address, 556 .ndo_set_mac_address = mcs7830_set_mac_address,
499}; 557};
500 558
501static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) 559static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
502{ 560{
503 struct net_device *net = dev->net; 561 struct net_device *net = dev->net;
504 int ret; 562 int ret;
563 int retry;
505 564
506 ret = mcs7830_init_dev(dev); 565 /* Initial startup: Gather MAC address setting from EEPROM */
566 ret = -EINVAL;
567 for (retry = 0; retry < 5 && ret; retry++)
568 ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
569 if (ret) {
570 dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
571 goto out;
572 }
573
574 mcs7830_data_set_multicast(net);
575
576 ret = mcs7830_apply_base_config(dev);
507 if (ret) 577 if (ret)
508 goto out; 578 goto out;
509 579
510 net->ethtool_ops = &mcs7830_ethtool_ops; 580 net->ethtool_ops = &mcs7830_ethtool_ops;
511 net->netdev_ops = &mcs7830_netdev_ops; 581 net->netdev_ops = &mcs7830_netdev_ops;
512 mcs7830_set_multicast(net);
513 582
514 /* reserve space for the status byte on rx */ 583 /* reserve space for the status byte on rx */
515 dev->rx_urb_size = ETH_FRAME_LEN + 1; 584 dev->rx_urb_size = ETH_FRAME_LEN + 1;
@@ -526,7 +595,7 @@ out:
526 return ret; 595 return ret;
527} 596}
528 597
529/* The chip always appends a status bytes that we need to strip */ 598/* The chip always appends a status byte that we need to strip */
530static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 599static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
531{ 600{
532 u8 status; 601 u8 status;
@@ -539,9 +608,23 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
539 skb_trim(skb, skb->len - 1); 608 skb_trim(skb, skb->len - 1);
540 status = skb->data[skb->len]; 609 status = skb->data[skb->len];
541 610
542 if (status != 0x20) 611 if (status != MCS7830_RX_FRAME_CORRECT) {
543 dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status); 612 dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status);
544 613
614 /* hmm, perhaps usbnet.c already sees a globally visible
615 frame error and increments rx_errors on its own already? */
616 dev->net->stats.rx_errors++;
617
618 if (status & (MCS7830_RX_SHORT_FRAME
619 |MCS7830_RX_LENGTH_ERROR
620 |MCS7830_RX_LARGE_FRAME))
621 dev->net->stats.rx_length_errors++;
622 if (status & MCS7830_RX_ALIGNMENT_ERROR)
623 dev->net->stats.rx_frame_errors++;
624 if (status & MCS7830_RX_CRC_ERROR)
625 dev->net->stats.rx_crc_errors++;
626 }
627
545 return skb->len > 0; 628 return skb->len > 0;
546} 629}
547 630
@@ -580,6 +663,20 @@ static const struct usb_device_id products[] = {
580}; 663};
581MODULE_DEVICE_TABLE(usb, products); 664MODULE_DEVICE_TABLE(usb, products);
582 665
666static int mcs7830_reset_resume (struct usb_interface *intf)
667{
668 /* YES, this function is successful enough that ethtool -d
669 does show same output pre-/post-suspend */
670
671 struct usbnet *dev = usb_get_intfdata(intf);
672
673 mcs7830_apply_base_config(dev);
674
675 usbnet_resume(intf);
676
677 return 0;
678}
679
583static struct usb_driver mcs7830_driver = { 680static struct usb_driver mcs7830_driver = {
584 .name = driver_name, 681 .name = driver_name,
585 .id_table = products, 682 .id_table = products,
@@ -587,6 +684,7 @@ static struct usb_driver mcs7830_driver = {
587 .disconnect = usbnet_disconnect, 684 .disconnect = usbnet_disconnect,
588 .suspend = usbnet_suspend, 685 .suspend = usbnet_suspend,
589 .resume = usbnet_resume, 686 .resume = usbnet_resume,
687 .reset_resume = mcs7830_reset_resume,
590}; 688};
591 689
592static int __init mcs7830_init(void) 690static int __init mcs7830_init(void)
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index fd19db0d250..21ac103fbb7 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -313,20 +313,17 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
313{ 313{
314 struct sockaddr *addr = p; 314 struct sockaddr *addr = p;
315 rtl8150_t *dev = netdev_priv(netdev); 315 rtl8150_t *dev = netdev_priv(netdev);
316 int i;
317 316
318 if (netif_running(netdev)) 317 if (netif_running(netdev))
319 return -EBUSY; 318 return -EBUSY;
320 319
321 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 320 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
322 dbg("%s: Setting MAC address to ", netdev->name); 321 dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
323 for (i = 0; i < 5; i++)
324 dbg("%02X:", netdev->dev_addr[i]);
325 dbg("%02X\n", netdev->dev_addr[i]);
326 /* Set the IDR registers. */ 322 /* Set the IDR registers. */
327 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); 323 set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
328#ifdef EEPROM_WRITE 324#ifdef EEPROM_WRITE
329 { 325 {
326 int i;
330 u8 cr; 327 u8 cr;
331 /* Get the CR contents. */ 328 /* Get the CR contents. */
332 get_registers(dev, CR, 1, &cr); 329 get_registers(dev, CR, 1, &cr);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 611b8043595..a7e0c84426e 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -267,7 +267,7 @@ enum rhine_quirks {
267/* Beware of PCI posted writes */ 267/* Beware of PCI posted writes */
268#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) 268#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
269 269
270static const struct pci_device_id rhine_pci_tbl[] = { 270static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
271 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ 271 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
272 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ 272 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
273 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ 273 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index c93f58f5c6f..f15485efe40 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -361,7 +361,7 @@ static struct velocity_info_tbl chip_info_table[] = {
361 * Describe the PCI device identifiers that we support in this 361 * Describe the PCI device identifiers that we support in this
362 * device driver. Used for hotplug autoloading. 362 * device driver. Used for hotplug autoloading.
363 */ 363 */
364static const struct pci_device_id velocity_id_table[] __devinitdata = { 364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, 365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
366 { } 366 { }
367}; 367};
@@ -2702,10 +2702,8 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
2702 struct net_device *dev = vptr->dev; 2702 struct net_device *dev = vptr->dev;
2703 2703
2704 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); 2704 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2705 printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", 2705 printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2706 dev->name, 2706 dev->name, dev->dev_addr);
2707 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2708 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2709} 2707}
2710 2708
2711static u32 velocity_get_link(struct net_device *dev) 2709static u32 velocity_get_link(struct net_device *dev)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ead30bd00c..9d8984a3741 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -56,8 +56,7 @@ struct virtnet_info
56 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 56 /* Host will merge rx buffers for big packets (shake it! shake it!) */
57 bool mergeable_rx_bufs; 57 bool mergeable_rx_bufs;
58 58
59 /* Receive & send queues. */ 59 /* Send queue. */
60 struct sk_buff_head recv;
61 struct sk_buff_head send; 60 struct sk_buff_head send;
62 61
63 /* Work struct for refilling if we run low on memory. */ 62 /* Work struct for refilling if we run low on memory. */
@@ -75,34 +74,44 @@ struct skb_vnet_hdr {
75 unsigned int num_sg; 74 unsigned int num_sg;
76}; 75};
77 76
77struct padded_vnet_hdr {
78 struct virtio_net_hdr hdr;
79 /*
80 * virtio_net_hdr should be in a separated sg buffer because of a
81 * QEMU bug, and data sg buffer shares same page with this header sg.
82 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
83 */
84 char padding[6];
85};
86
78static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) 87static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
79{ 88{
80 return (struct skb_vnet_hdr *)skb->cb; 89 return (struct skb_vnet_hdr *)skb->cb;
81} 90}
82 91
83static void give_a_page(struct virtnet_info *vi, struct page *page) 92/*
84{ 93 * private is used to chain pages for big packets, put the whole
85 page->private = (unsigned long)vi->pages; 94 * most recent used list in the beginning for reuse
86 vi->pages = page; 95 */
87} 96static void give_pages(struct virtnet_info *vi, struct page *page)
88
89static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
90{ 97{
91 unsigned int i; 98 struct page *end;
92 99
93 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 100 /* Find end of list, sew whole thing into vi->pages. */
94 give_a_page(vi, skb_shinfo(skb)->frags[i].page); 101 for (end = page; end->private; end = (struct page *)end->private);
95 skb_shinfo(skb)->nr_frags = 0; 102 end->private = (unsigned long)vi->pages;
96 skb->data_len = 0; 103 vi->pages = page;
97} 104}
98 105
99static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) 106static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
100{ 107{
101 struct page *p = vi->pages; 108 struct page *p = vi->pages;
102 109
103 if (p) 110 if (p) {
104 vi->pages = (struct page *)p->private; 111 vi->pages = (struct page *)p->private;
105 else 112 /* clear private here, it is used to chain pages */
113 p->private = 0;
114 } else
106 p = alloc_page(gfp_mask); 115 p = alloc_page(gfp_mask);
107 return p; 116 return p;
108} 117}
@@ -118,99 +127,142 @@ static void skb_xmit_done(struct virtqueue *svq)
118 netif_wake_queue(vi->dev); 127 netif_wake_queue(vi->dev);
119} 128}
120 129
121static void receive_skb(struct net_device *dev, struct sk_buff *skb, 130static void set_skb_frag(struct sk_buff *skb, struct page *page,
122 unsigned len) 131 unsigned int offset, unsigned int *len)
123{ 132{
124 struct virtnet_info *vi = netdev_priv(dev); 133 int i = skb_shinfo(skb)->nr_frags;
125 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 134 skb_frag_t *f;
126 int err; 135
127 int i; 136 f = &skb_shinfo(skb)->frags[i];
128 137 f->size = min((unsigned)PAGE_SIZE - offset, *len);
129 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 138 f->page_offset = offset;
130 pr_debug("%s: short packet %i\n", dev->name, len); 139 f->page = page;
131 dev->stats.rx_length_errors++; 140
132 goto drop; 141 skb->data_len += f->size;
133 } 142 skb->len += f->size;
143 skb_shinfo(skb)->nr_frags++;
144 *len -= f->size;
145}
134 146
135 if (vi->mergeable_rx_bufs) { 147static struct sk_buff *page_to_skb(struct virtnet_info *vi,
136 unsigned int copy; 148 struct page *page, unsigned int len)
137 char *p = page_address(skb_shinfo(skb)->frags[0].page); 149{
150 struct sk_buff *skb;
151 struct skb_vnet_hdr *hdr;
152 unsigned int copy, hdr_len, offset;
153 char *p;
138 154
139 if (len > PAGE_SIZE) 155 p = page_address(page);
140 len = PAGE_SIZE;
141 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
142 156
143 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr)); 157 /* copy small packet so we can reuse these pages for small data */
144 p += sizeof(hdr->mhdr); 158 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
159 if (unlikely(!skb))
160 return NULL;
145 161
146 copy = len; 162 hdr = skb_vnet_hdr(skb);
147 if (copy > skb_tailroom(skb))
148 copy = skb_tailroom(skb);
149 163
150 memcpy(skb_put(skb, copy), p, copy); 164 if (vi->mergeable_rx_bufs) {
165 hdr_len = sizeof hdr->mhdr;
166 offset = hdr_len;
167 } else {
168 hdr_len = sizeof hdr->hdr;
169 offset = sizeof(struct padded_vnet_hdr);
170 }
151 171
152 len -= copy; 172 memcpy(hdr, p, hdr_len);
153 173
154 if (!len) { 174 len -= hdr_len;
155 give_a_page(vi, skb_shinfo(skb)->frags[0].page); 175 p += offset;
156 skb_shinfo(skb)->nr_frags--;
157 } else {
158 skb_shinfo(skb)->frags[0].page_offset +=
159 sizeof(hdr->mhdr) + copy;
160 skb_shinfo(skb)->frags[0].size = len;
161 skb->data_len += len;
162 skb->len += len;
163 }
164 176
165 while (--hdr->mhdr.num_buffers) { 177 copy = len;
166 struct sk_buff *nskb; 178 if (copy > skb_tailroom(skb))
179 copy = skb_tailroom(skb);
180 memcpy(skb_put(skb, copy), p, copy);
167 181
168 i = skb_shinfo(skb)->nr_frags; 182 len -= copy;
169 if (i >= MAX_SKB_FRAGS) { 183 offset += copy;
170 pr_debug("%s: packet too long %d\n", dev->name,
171 len);
172 dev->stats.rx_length_errors++;
173 goto drop;
174 }
175 184
176 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 185 while (len) {
177 if (!nskb) { 186 set_skb_frag(skb, page, offset, &len);
178 pr_debug("%s: rx error: %d buffers missing\n", 187 page = (struct page *)page->private;
179 dev->name, hdr->mhdr.num_buffers); 188 offset = 0;
180 dev->stats.rx_length_errors++; 189 }
181 goto drop;
182 }
183 190
184 __skb_unlink(nskb, &vi->recv); 191 if (page)
185 vi->num--; 192 give_pages(vi, page);
186 193
187 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0]; 194 return skb;
188 skb_shinfo(nskb)->nr_frags = 0; 195}
189 kfree_skb(nskb);
190 196
191 if (len > PAGE_SIZE) 197static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
192 len = PAGE_SIZE; 198{
199 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
200 struct page *page;
201 int num_buf, i, len;
202
203 num_buf = hdr->mhdr.num_buffers;
204 while (--num_buf) {
205 i = skb_shinfo(skb)->nr_frags;
206 if (i >= MAX_SKB_FRAGS) {
207 pr_debug("%s: packet too long\n", skb->dev->name);
208 skb->dev->stats.rx_length_errors++;
209 return -EINVAL;
210 }
193 211
194 skb_shinfo(skb)->frags[i].size = len; 212 page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
195 skb_shinfo(skb)->nr_frags++; 213 if (!page) {
196 skb->data_len += len; 214 pr_debug("%s: rx error: %d buffers missing\n",
197 skb->len += len; 215 skb->dev->name, hdr->mhdr.num_buffers);
216 skb->dev->stats.rx_length_errors++;
217 return -EINVAL;
198 } 218 }
199 } else { 219 if (len > PAGE_SIZE)
200 len -= sizeof(hdr->hdr); 220 len = PAGE_SIZE;
221
222 set_skb_frag(skb, page, 0, &len);
223
224 --vi->num;
225 }
226 return 0;
227}
228
229static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
230{
231 struct virtnet_info *vi = netdev_priv(dev);
232 struct sk_buff *skb;
233 struct page *page;
234 struct skb_vnet_hdr *hdr;
201 235
202 if (len <= MAX_PACKET_LEN) 236 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
203 trim_pages(vi, skb); 237 pr_debug("%s: short packet %i\n", dev->name, len);
238 dev->stats.rx_length_errors++;
239 if (vi->mergeable_rx_bufs || vi->big_packets)
240 give_pages(vi, buf);
241 else
242 dev_kfree_skb(buf);
243 return;
244 }
204 245
205 err = pskb_trim(skb, len); 246 if (!vi->mergeable_rx_bufs && !vi->big_packets) {
206 if (err) { 247 skb = buf;
207 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, 248 len -= sizeof(struct virtio_net_hdr);
208 len, err); 249 skb_trim(skb, len);
250 } else {
251 page = buf;
252 skb = page_to_skb(vi, page, len);
253 if (unlikely(!skb)) {
209 dev->stats.rx_dropped++; 254 dev->stats.rx_dropped++;
210 goto drop; 255 give_pages(vi, page);
256 return;
211 } 257 }
258 if (vi->mergeable_rx_bufs)
259 if (receive_mergeable(vi, skb)) {
260 dev_kfree_skb(skb);
261 return;
262 }
212 } 263 }
213 264
265 hdr = skb_vnet_hdr(skb);
214 skb->truesize += skb->data_len; 266 skb->truesize += skb->data_len;
215 dev->stats.rx_bytes += skb->len; 267 dev->stats.rx_bytes += skb->len;
216 dev->stats.rx_packets++; 268 dev->stats.rx_packets++;
@@ -267,110 +319,119 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
267 319
268frame_err: 320frame_err:
269 dev->stats.rx_frame_errors++; 321 dev->stats.rx_frame_errors++;
270drop:
271 dev_kfree_skb(skb); 322 dev_kfree_skb(skb);
272} 323}
273 324
274static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) 325static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
275{ 326{
276 struct sk_buff *skb; 327 struct sk_buff *skb;
277 struct scatterlist sg[2+MAX_SKB_FRAGS]; 328 struct skb_vnet_hdr *hdr;
278 int num, err, i; 329 struct scatterlist sg[2];
279 bool oom = false; 330 int err;
280
281 sg_init_table(sg, 2+MAX_SKB_FRAGS);
282 do {
283 struct skb_vnet_hdr *hdr;
284 331
285 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); 332 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
286 if (unlikely(!skb)) { 333 if (unlikely(!skb))
287 oom = true; 334 return -ENOMEM;
288 break;
289 }
290 335
291 skb_put(skb, MAX_PACKET_LEN); 336 skb_put(skb, MAX_PACKET_LEN);
292 337
293 hdr = skb_vnet_hdr(skb); 338 hdr = skb_vnet_hdr(skb);
294 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr)); 339 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
295 340
296 if (vi->big_packets) { 341 skb_to_sgvec(skb, sg + 1, 0, skb->len);
297 for (i = 0; i < MAX_SKB_FRAGS; i++) {
298 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
299 f->page = get_a_page(vi, gfp);
300 if (!f->page)
301 break;
302 342
303 f->page_offset = 0; 343 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
304 f->size = PAGE_SIZE; 344 if (err < 0)
345 dev_kfree_skb(skb);
305 346
306 skb->data_len += PAGE_SIZE; 347 return err;
307 skb->len += PAGE_SIZE; 348}
308 349
309 skb_shinfo(skb)->nr_frags++; 350static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
310 } 351{
352 struct scatterlist sg[MAX_SKB_FRAGS + 2];
353 struct page *first, *list = NULL;
354 char *p;
355 int i, err, offset;
356
357 /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
358 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
359 first = get_a_page(vi, gfp);
360 if (!first) {
361 if (list)
362 give_pages(vi, list);
363 return -ENOMEM;
311 } 364 }
365 sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
312 366
313 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 367 /* chain new page in list head to match sg */
314 skb_queue_head(&vi->recv, skb); 368 first->private = (unsigned long)list;
369 list = first;
370 }
315 371
316 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); 372 first = get_a_page(vi, gfp);
317 if (err < 0) { 373 if (!first) {
318 skb_unlink(skb, &vi->recv); 374 give_pages(vi, list);
319 trim_pages(vi, skb); 375 return -ENOMEM;
320 kfree_skb(skb); 376 }
321 break; 377 p = page_address(first);
322 } 378
323 vi->num++; 379 /* sg[0], sg[1] share the same page */
324 } while (err >= num); 380 /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
325 if (unlikely(vi->num > vi->max)) 381 sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
326 vi->max = vi->num; 382
327 vi->rvq->vq_ops->kick(vi->rvq); 383 /* sg[1] for data packet, from offset */
328 return !oom; 384 offset = sizeof(struct padded_vnet_hdr);
385 sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
386
387 /* chain first in list head */
388 first->private = (unsigned long)list;
389 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
390 first);
391 if (err < 0)
392 give_pages(vi, first);
393
394 return err;
329} 395}
330 396
331/* Returns false if we couldn't fill entirely (OOM). */ 397static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
332static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
333{ 398{
334 struct sk_buff *skb; 399 struct page *page;
335 struct scatterlist sg[1]; 400 struct scatterlist sg;
336 int err; 401 int err;
337 bool oom = false;
338
339 if (!vi->mergeable_rx_bufs)
340 return try_fill_recv_maxbufs(vi, gfp);
341 402
342 do { 403 page = get_a_page(vi, gfp);
343 skb_frag_t *f; 404 if (!page)
405 return -ENOMEM;
344 406
345 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); 407 sg_init_one(&sg, page_address(page), PAGE_SIZE);
346 if (unlikely(!skb)) {
347 oom = true;
348 break;
349 }
350 408
351 f = &skb_shinfo(skb)->frags[0]; 409 err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
352 f->page = get_a_page(vi, gfp); 410 if (err < 0)
353 if (!f->page) { 411 give_pages(vi, page);
354 oom = true;
355 kfree_skb(skb);
356 break;
357 }
358 412
359 f->page_offset = 0; 413 return err;
360 f->size = PAGE_SIZE; 414}
361 415
362 skb_shinfo(skb)->nr_frags++; 416/* Returns false if we couldn't fill entirely (OOM). */
417static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
418{
419 int err;
420 bool oom = false;
363 421
364 sg_init_one(sg, page_address(f->page), PAGE_SIZE); 422 do {
365 skb_queue_head(&vi->recv, skb); 423 if (vi->mergeable_rx_bufs)
424 err = add_recvbuf_mergeable(vi, gfp);
425 else if (vi->big_packets)
426 err = add_recvbuf_big(vi, gfp);
427 else
428 err = add_recvbuf_small(vi, gfp);
366 429
367 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
368 if (err < 0) { 430 if (err < 0) {
369 skb_unlink(skb, &vi->recv); 431 oom = true;
370 kfree_skb(skb);
371 break; 432 break;
372 } 433 }
373 vi->num++; 434 ++vi->num;
374 } while (err > 0); 435 } while (err > 0);
375 if (unlikely(vi->num > vi->max)) 436 if (unlikely(vi->num > vi->max))
376 vi->max = vi->num; 437 vi->max = vi->num;
@@ -407,15 +468,14 @@ static void refill_work(struct work_struct *work)
407static int virtnet_poll(struct napi_struct *napi, int budget) 468static int virtnet_poll(struct napi_struct *napi, int budget)
408{ 469{
409 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 470 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
410 struct sk_buff *skb = NULL; 471 void *buf;
411 unsigned int len, received = 0; 472 unsigned int len, received = 0;
412 473
413again: 474again:
414 while (received < budget && 475 while (received < budget &&
415 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { 476 (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
416 __skb_unlink(skb, &vi->recv); 477 receive_buf(vi->dev, buf, len);
417 receive_skb(vi->dev, skb, len); 478 --vi->num;
418 vi->num--;
419 received++; 479 received++;
420 } 480 }
421 481
@@ -495,9 +555,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
495 555
496 /* Encode metadata header at front. */ 556 /* Encode metadata header at front. */
497 if (vi->mergeable_rx_bufs) 557 if (vi->mergeable_rx_bufs)
498 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr)); 558 sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
499 else 559 else
500 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr)); 560 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
501 561
502 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 562 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
503 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); 563 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
@@ -674,6 +734,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
674 struct virtio_net_ctrl_mac *mac_data; 734 struct virtio_net_ctrl_mac *mac_data;
675 struct dev_addr_list *addr; 735 struct dev_addr_list *addr;
676 struct netdev_hw_addr *ha; 736 struct netdev_hw_addr *ha;
737 int uc_count;
677 void *buf; 738 void *buf;
678 int i; 739 int i;
679 740
@@ -700,8 +761,9 @@ static void virtnet_set_rx_mode(struct net_device *dev)
700 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 761 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
701 allmulti ? "en" : "dis"); 762 allmulti ? "en" : "dis");
702 763
764 uc_count = netdev_uc_count(dev);
703 /* MAC filter - use one buffer for both lists */ 765 /* MAC filter - use one buffer for both lists */
704 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) + 766 mac_data = buf = kzalloc(((uc_count + dev->mc_count) * ETH_ALEN) +
705 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 767 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
706 if (!buf) { 768 if (!buf) {
707 dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 769 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
@@ -711,16 +773,16 @@ static void virtnet_set_rx_mode(struct net_device *dev)
711 sg_init_table(sg, 2); 773 sg_init_table(sg, 2);
712 774
713 /* Store the unicast list and count in the front of the buffer */ 775 /* Store the unicast list and count in the front of the buffer */
714 mac_data->entries = dev->uc.count; 776 mac_data->entries = uc_count;
715 i = 0; 777 i = 0;
716 list_for_each_entry(ha, &dev->uc.list, list) 778 netdev_for_each_uc_addr(ha, dev)
717 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 779 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
718 780
719 sg_set_buf(&sg[0], mac_data, 781 sg_set_buf(&sg[0], mac_data,
720 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN)); 782 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
721 783
722 /* multicast list and count fill the end */ 784 /* multicast list and count fill the end */
723 mac_data = (void *)&mac_data->macs[dev->uc.count][0]; 785 mac_data = (void *)&mac_data->macs[uc_count][0];
724 786
725 mac_data->entries = dev->mc_count; 787 mac_data->entries = dev->mc_count;
726 addr = dev->mc_list; 788 addr = dev->mc_list;
@@ -915,8 +977,7 @@ static int virtnet_probe(struct virtio_device *vdev)
915 dev->features |= NETIF_F_HW_VLAN_FILTER; 977 dev->features |= NETIF_F_HW_VLAN_FILTER;
916 } 978 }
917 979
918 /* Initialize our empty receive and send queues. */ 980 /* Initialize our empty send queue. */
919 skb_queue_head_init(&vi->recv);
920 skb_queue_head_init(&vi->send); 981 skb_queue_head_init(&vi->send);
921 982
922 err = register_netdev(dev); 983 err = register_netdev(dev);
@@ -951,25 +1012,35 @@ free:
951 return err; 1012 return err;
952} 1013}
953 1014
1015static void free_unused_bufs(struct virtnet_info *vi)
1016{
1017 void *buf;
1018 while (1) {
1019 buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
1020 if (!buf)
1021 break;
1022 if (vi->mergeable_rx_bufs || vi->big_packets)
1023 give_pages(vi, buf);
1024 else
1025 dev_kfree_skb(buf);
1026 --vi->num;
1027 }
1028 BUG_ON(vi->num != 0);
1029}
1030
954static void __devexit virtnet_remove(struct virtio_device *vdev) 1031static void __devexit virtnet_remove(struct virtio_device *vdev)
955{ 1032{
956 struct virtnet_info *vi = vdev->priv; 1033 struct virtnet_info *vi = vdev->priv;
957 struct sk_buff *skb;
958 1034
959 /* Stop all the virtqueues. */ 1035 /* Stop all the virtqueues. */
960 vdev->config->reset(vdev); 1036 vdev->config->reset(vdev);
961 1037
962 /* Free our skbs in send and recv queues, if any. */ 1038 /* Free our skbs in send queue, if any. */
963 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
964 kfree_skb(skb);
965 vi->num--;
966 }
967 __skb_queue_purge(&vi->send); 1039 __skb_queue_purge(&vi->send);
968 1040
969 BUG_ON(vi->num != 0);
970
971 unregister_netdev(vi->dev); 1041 unregister_netdev(vi->dev);
972 cancel_delayed_work_sync(&vi->refill); 1042 cancel_delayed_work_sync(&vi->refill);
1043 free_unused_bufs(vi);
973 1044
974 vdev->config->del_vqs(vi->vdev); 1045 vdev->config->del_vqs(vi->vdev);
975 1046
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9cc438282d7..b896f938611 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -35,7 +35,7 @@ char vmxnet3_driver_name[] = "vmxnet3";
35 * PCI Device ID Table 35 * PCI Device ID Table
36 * Last entry must be all 0s 36 * Last entry must be all 0s
37 */ 37 */
38static const struct pci_device_id vmxnet3_pciid_table[] = { 38static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
39 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, 39 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
40 {0} 40 {0}
41}; 41};
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b9685e82f7b..a6606b8948e 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("Dual BSD/GPL");
54MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" 54MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
55 "Virtualized Server Adapter"); 55 "Virtualized Server Adapter");
56 56
57static struct pci_device_id vxge_id_table[] __devinitdata = { 57static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, 58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
59 PCI_ANY_ID}, 59 PCI_ANY_ID},
60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, 60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
@@ -4297,10 +4297,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4297 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", 4297 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4298 vdev->ndev->name, ll_config.device_hw_info.product_desc); 4298 vdev->ndev->name, ll_config.device_hw_info.product_desc);
4299 4299
4300 vxge_debug_init(VXGE_TRACE, 4300 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4301 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X", 4301 vdev->ndev->name, macaddr);
4302 vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
4303 macaddr[3], macaddr[4], macaddr[5]);
4304 4302
4305 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", 4303 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4306 vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); 4304 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 3f759daf3ca..f88c07c1319 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2050,7 +2050,7 @@ static int __init dscc4_setup(char *str)
2050__setup("dscc4.setup=", dscc4_setup); 2050__setup("dscc4.setup=", dscc4_setup);
2051#endif 2051#endif
2052 2052
2053static struct pci_device_id dscc4_pci_tbl[] = { 2053static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
2054 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, 2054 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
2055 PCI_ANY_ID, PCI_ANY_ID, }, 2055 PCI_ANY_ID, PCI_ANY_ID, },
2056 { 0,} 2056 { 0,}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9bc2e364915..40d724a8e02 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -528,7 +528,7 @@ static int fst_debug_mask = { FST_DEBUG };
528/* 528/*
529 * PCI ID lookup table 529 * PCI ID lookup table
530 */ 530 */
531static struct pci_device_id fst_pci_dev_id[] __devinitdata = { 531static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
532 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID, 532 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
533 PCI_ANY_ID, 0, 0, FST_TYPE_T2P}, 533 PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
534 534
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4b6f27e7c82..b2785037712 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -77,7 +77,7 @@
77 77
78static int LMC_PKT_BUF_SZ = 1542; 78static int LMC_PKT_BUF_SZ = 1542;
79 79
80static struct pci_device_id lmc_pci_tbl[] = { 80static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
81 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, 81 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
82 PCI_VENDOR_ID_LMC, PCI_ANY_ID }, 82 PCI_VENDOR_ID_LMC, PCI_ANY_ID },
83 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, 83 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index aec4d395542..f4f1c00d0d2 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -251,7 +251,7 @@ static char rcsid[] =
251#undef PC300_DEBUG_RX 251#undef PC300_DEBUG_RX
252#undef PC300_DEBUG_OTHER 252#undef PC300_DEBUG_OTHER
253 253
254static struct pci_device_id cpc_pci_dev_id[] __devinitdata = { 254static DEFINE_PCI_DEVICE_TABLE(cpc_pci_dev_id) = {
255 /* PC300/RSV or PC300/X21, 2 chan */ 255 /* PC300/RSV or PC300/X21, 2 chan */
256 {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300}, 256 {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
257 /* PC300/RSV or PC300/X21, 1 chan */ 257 /* PC300/RSV or PC300/X21, 1 chan */
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 60ece54bdd9..c7ab3becd26 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -481,7 +481,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
481 481
482 482
483 483
484static struct pci_device_id pc300_pci_tbl[] __devinitdata = { 484static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = {
485 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID, 485 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
486 PCI_ANY_ID, 0, 0, 0 }, 486 PCI_ANY_ID, 0, 0, 0 },
487 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID, 487 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index f1340faaf02..e2cff64a446 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -417,7 +417,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
417 417
418 418
419 419
420static struct pci_device_id pci200_pci_tbl[] __devinitdata = { 420static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
421 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, 421 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
422 PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 }, 422 PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
423 { 0, } 423 { 0, }
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index daee8a0624e..541c700dcee 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -814,7 +814,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
814 return 0; 814 return 0;
815} 815}
816 816
817static struct pci_device_id wanxl_pci_tbl[] __devinitdata = { 817static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
818 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID, 818 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
819 PCI_ANY_ID, 0, 0, 0 }, 819 PCI_ANY_ID, 0, 0, 0 },
820 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID, 820 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 96a615fe09d..6cead321bc1 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -301,24 +301,15 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
301 /* Extract MAC addresss */ 301 /* Extract MAC addresss */
302 ddi = (void *) skb->data; 302 ddi = (void *) skb->data;
303 BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address)); 303 BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
304 d_printf(2, dev, "GET DEVICE INFO: mac addr " 304 d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
305 "%02x:%02x:%02x:%02x:%02x:%02x\n", 305 ddi->mac_address);
306 ddi->mac_address[0], ddi->mac_address[1],
307 ddi->mac_address[2], ddi->mac_address[3],
308 ddi->mac_address[4], ddi->mac_address[5]);
309 if (!memcmp(net_dev->perm_addr, ddi->mac_address, 306 if (!memcmp(net_dev->perm_addr, ddi->mac_address,
310 sizeof(ddi->mac_address))) 307 sizeof(ddi->mac_address)))
311 goto ok; 308 goto ok;
312 dev_warn(dev, "warning: device reports a different MAC address " 309 dev_warn(dev, "warning: device reports a different MAC address "
313 "to that of boot mode's\n"); 310 "to that of boot mode's\n");
314 dev_warn(dev, "device reports %02x:%02x:%02x:%02x:%02x:%02x\n", 311 dev_warn(dev, "device reports %pM\n", ddi->mac_address);
315 ddi->mac_address[0], ddi->mac_address[1], 312 dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
316 ddi->mac_address[2], ddi->mac_address[3],
317 ddi->mac_address[4], ddi->mac_address[5]);
318 dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
319 net_dev->perm_addr[0], net_dev->perm_addr[1],
320 net_dev->perm_addr[2], net_dev->perm_addr[3],
321 net_dev->perm_addr[4], net_dev->perm_addr[5]);
322 if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac))) 313 if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
323 dev_err(dev, "device reports an invalid MAC address, " 314 dev_err(dev, "device reports an invalid MAC address, "
324 "not updating\n"); 315 "not updating\n");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 64cdfeb299c..e803a7dc650 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1041,21 +1041,14 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
1041 dev_err(dev, "BM: read mac addr failed: %d\n", result); 1041 dev_err(dev, "BM: read mac addr failed: %d\n", result);
1042 goto error_read_mac; 1042 goto error_read_mac;
1043 } 1043 }
1044 d_printf(2, dev, 1044 d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
1045 "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
1046 ack_buf.ack_pl[0], ack_buf.ack_pl[1],
1047 ack_buf.ack_pl[2], ack_buf.ack_pl[3],
1048 ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
1049 if (i2400m->bus_bm_mac_addr_impaired == 1) { 1045 if (i2400m->bus_bm_mac_addr_impaired == 1) {
1050 ack_buf.ack_pl[0] = 0x00; 1046 ack_buf.ack_pl[0] = 0x00;
1051 ack_buf.ack_pl[1] = 0x16; 1047 ack_buf.ack_pl[1] = 0x16;
1052 ack_buf.ack_pl[2] = 0xd3; 1048 ack_buf.ack_pl[2] = 0xd3;
1053 get_random_bytes(&ack_buf.ack_pl[3], 3); 1049 get_random_bytes(&ack_buf.ack_pl[3], 3);
1054 dev_err(dev, "BM is MAC addr impaired, faking MAC addr to " 1050 dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
1055 "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n", 1051 "mac addr is %pM\n", ack_buf.ack_pl);
1056 ack_buf.ack_pl[0], ack_buf.ack_pl[1],
1057 ack_buf.ack_pl[2], ack_buf.ack_pl[3],
1058 ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
1059 result = 0; 1052 result = 0;
1060 } 1053 }
1061 net_dev->addr_len = ETH_ALEN; 1054 net_dev->addr_len = ETH_ALEN;
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 39410016b4f..e6ca3eb4c0d 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -39,7 +39,7 @@ static unsigned int rx_ring_size __read_mostly = 16;
39module_param(tx_ring_size, uint, 0); 39module_param(tx_ring_size, uint, 0);
40module_param(rx_ring_size, uint, 0); 40module_param(rx_ring_size, uint, 0);
41 41
42static struct pci_device_id adm8211_pci_id_table[] __devinitdata = { 42static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = {
43 /* ADMtek ADM8211 */ 43 /* ADMtek ADM8211 */
44 { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */ 44 { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
45 { PCI_DEVICE(0x1200, 0x8201) }, /* ? */ 45 { PCI_DEVICE(0x1200, 0x8201) }, /* ? */
@@ -1400,15 +1400,15 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
1400} 1400}
1401 1401
1402static int adm8211_add_interface(struct ieee80211_hw *dev, 1402static int adm8211_add_interface(struct ieee80211_hw *dev,
1403 struct ieee80211_if_init_conf *conf) 1403 struct ieee80211_vif *vif)
1404{ 1404{
1405 struct adm8211_priv *priv = dev->priv; 1405 struct adm8211_priv *priv = dev->priv;
1406 if (priv->mode != NL80211_IFTYPE_MONITOR) 1406 if (priv->mode != NL80211_IFTYPE_MONITOR)
1407 return -EOPNOTSUPP; 1407 return -EOPNOTSUPP;
1408 1408
1409 switch (conf->type) { 1409 switch (vif->type) {
1410 case NL80211_IFTYPE_STATION: 1410 case NL80211_IFTYPE_STATION:
1411 priv->mode = conf->type; 1411 priv->mode = vif->type;
1412 break; 1412 break;
1413 default: 1413 default:
1414 return -EOPNOTSUPP; 1414 return -EOPNOTSUPP;
@@ -1416,8 +1416,8 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
1416 1416
1417 ADM8211_IDLE(); 1417 ADM8211_IDLE();
1418 1418
1419 ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr)); 1419 ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
1420 ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4))); 1420 ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
1421 1421
1422 adm8211_update_mode(dev); 1422 adm8211_update_mode(dev);
1423 1423
@@ -1427,7 +1427,7 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
1427} 1427}
1428 1428
1429static void adm8211_remove_interface(struct ieee80211_hw *dev, 1429static void adm8211_remove_interface(struct ieee80211_hw *dev,
1430 struct ieee80211_if_init_conf *conf) 1430 struct ieee80211_vif *vif)
1431{ 1431{
1432 struct adm8211_priv *priv = dev->priv; 1432 struct adm8211_priv *priv = dev->priv;
1433 priv->mode = NL80211_IFTYPE_MONITOR; 1433 priv->mode = NL80211_IFTYPE_MONITOR;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 4331d675fcc..37e4ab737f2 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -57,7 +57,7 @@
57#define DRV_NAME "airo" 57#define DRV_NAME "airo"
58 58
59#ifdef CONFIG_PCI 59#ifdef CONFIG_PCI
60static struct pci_device_id card_ids[] = { 60static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
61 { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, }, 61 { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
62 { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID }, 62 { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
63 { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, }, 63 { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 2517364d3eb..0fb419936df 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1789,7 +1789,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw)
1789} 1789}
1790 1790
1791static int at76_add_interface(struct ieee80211_hw *hw, 1791static int at76_add_interface(struct ieee80211_hw *hw,
1792 struct ieee80211_if_init_conf *conf) 1792 struct ieee80211_vif *vif)
1793{ 1793{
1794 struct at76_priv *priv = hw->priv; 1794 struct at76_priv *priv = hw->priv;
1795 int ret = 0; 1795 int ret = 0;
@@ -1798,7 +1798,7 @@ static int at76_add_interface(struct ieee80211_hw *hw,
1798 1798
1799 mutex_lock(&priv->mtx); 1799 mutex_lock(&priv->mtx);
1800 1800
1801 switch (conf->type) { 1801 switch (vif->type) {
1802 case NL80211_IFTYPE_STATION: 1802 case NL80211_IFTYPE_STATION:
1803 priv->iw_mode = IW_MODE_INFRA; 1803 priv->iw_mode = IW_MODE_INFRA;
1804 break; 1804 break;
@@ -1814,7 +1814,7 @@ exit:
1814} 1814}
1815 1815
1816static void at76_remove_interface(struct ieee80211_hw *hw, 1816static void at76_remove_interface(struct ieee80211_hw *hw,
1817 struct ieee80211_if_init_conf *conf) 1817 struct ieee80211_vif *vif)
1818{ 1818{
1819 at76_dbg(DBG_MAC80211, "%s()", __func__); 1819 at76_dbg(DBG_MAC80211, "%s()", __func__);
1820} 1820}
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 9f9459860d8..b99a8c2053d 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,7 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
109 bool has_plcp; 109 bool has_plcp;
110}; 110};
111 111
112#define AR9170_NUM_MAX_BA_RETRY 5
113#define AR9170_NUM_TID 16 112#define AR9170_NUM_TID 16
114#define WME_BA_BMP_SIZE 64 113#define WME_BA_BMP_SIZE 64
115#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE) 114#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
@@ -143,7 +142,6 @@ struct ar9170_sta_tid {
143 u16 tid; 142 u16 tid;
144 enum ar9170_tid_state state; 143 enum ar9170_tid_state state;
145 bool active; 144 bool active;
146 u8 retry;
147}; 145};
148 146
149#define AR9170_QUEUE_TIMEOUT 64 147#define AR9170_QUEUE_TIMEOUT 64
@@ -154,6 +152,8 @@ struct ar9170_sta_tid {
154 152
155#define AR9170_NUM_TX_STATUS 128 153#define AR9170_NUM_TX_STATUS 128
156#define AR9170_NUM_TX_AGG_MAX 30 154#define AR9170_NUM_TX_AGG_MAX 30
155#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
156#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
157 157
158struct ar9170 { 158struct ar9170 {
159 struct ieee80211_hw *hw; 159 struct ieee80211_hw *hw;
@@ -248,13 +248,8 @@ struct ar9170_sta_info {
248 unsigned int ampdu_max_len; 248 unsigned int ampdu_max_len;
249}; 249};
250 250
251#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
252#define AR9170_TX_FLAG_NO_ACK BIT(1)
253#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
254
255struct ar9170_tx_info { 251struct ar9170_tx_info {
256 unsigned long timeout; 252 unsigned long timeout;
257 unsigned int flags;
258}; 253};
259 254
260#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED) 255#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 701ddb7d840..0a1d4c28e68 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -276,6 +276,7 @@ struct ar9170_tx_control {
276#define AR9170_TX_MAC_RATE_PROBE 0x8000 276#define AR9170_TX_MAC_RATE_PROBE 0x8000
277 277
278/* either-or */ 278/* either-or */
279#define AR9170_TX_PHY_MOD_MASK 0x00000003
279#define AR9170_TX_PHY_MOD_CCK 0x00000000 280#define AR9170_TX_PHY_MOD_CCK 0x00000000
280#define AR9170_TX_PHY_MOD_OFDM 0x00000001 281#define AR9170_TX_PHY_MOD_OFDM 0x00000001
281#define AR9170_TX_PHY_MOD_HT 0x00000002 282#define AR9170_TX_PHY_MOD_HT 0x00000002
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index ddc8c09dc79..857e8610429 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -117,7 +117,7 @@ int ar9170_set_qos(struct ar9170 *ar)
117 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP, 117 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
118 ar->edcf[0].txop | ar->edcf[1].txop << 16); 118 ar->edcf[0].txop | ar->edcf[1].txop << 16);
119 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP, 119 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
120 ar->edcf[1].txop | ar->edcf[3].txop << 16); 120 ar->edcf[2].txop | ar->edcf[3].txop << 16);
121 121
122 ar9170_regwrite_finish(); 122 ar9170_regwrite_finish();
123 123
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index f9d6db8d013..4d27f7f67c7 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -194,12 +194,15 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
194 return ar9170_get_seq_h((void *) txc->frame_data); 194 return ar9170_get_seq_h((void *) txc->frame_data);
195} 195}
196 196
197static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
198{
199 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
200}
201
197static inline u16 ar9170_get_tid(struct sk_buff *skb) 202static inline u16 ar9170_get_tid(struct sk_buff *skb)
198{ 203{
199 struct ar9170_tx_control *txc = (void *) skb->data; 204 struct ar9170_tx_control *txc = (void *) skb->data;
200 struct ieee80211_hdr *hdr = (void *) txc->frame_data; 205 return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
201
202 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
203} 206}
204 207
205#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff) 208#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
@@ -213,10 +216,10 @@ static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
213 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data; 216 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
214 struct ieee80211_hdr *hdr = (void *) txc->frame_data; 217 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
215 218
216 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d " 219 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
217 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n", 220 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
218 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), 221 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
219 ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr), 222 ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
220 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control), 223 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
221 jiffies_to_msecs(arinfo->timeout - jiffies)); 224 jiffies_to_msecs(arinfo->timeout - jiffies));
222} 225}
@@ -430,7 +433,7 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
430 spin_lock_irqsave(&ar->tx_stats_lock, flags); 433 spin_lock_irqsave(&ar->tx_stats_lock, flags);
431 ar->tx_stats[queue].len--; 434 ar->tx_stats[queue].len--;
432 435
433 if (skb_queue_empty(&ar->tx_pending[queue])) { 436 if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
434#ifdef AR9170_QUEUE_STOP_DEBUG 437#ifdef AR9170_QUEUE_STOP_DEBUG
435 printk(KERN_DEBUG "%s: wake queue %d\n", 438 printk(KERN_DEBUG "%s: wake queue %d\n",
436 wiphy_name(ar->hw->wiphy), queue); 439 wiphy_name(ar->hw->wiphy), queue);
@@ -440,22 +443,17 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
440 } 443 }
441 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 444 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
442 445
443 if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) { 446 if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
444 ar9170_tx_ampdu_callback(ar, skb);
445 } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
446 arinfo->timeout = jiffies +
447 msecs_to_jiffies(AR9170_TX_TIMEOUT);
448
449 skb_queue_tail(&ar->tx_status[queue], skb);
450 } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
451 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED); 447 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
452 } else { 448 } else {
453#ifdef AR9170_QUEUE_DEBUG 449 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
454 printk(KERN_DEBUG "%s: unsupported frame flags!\n", 450 ar9170_tx_ampdu_callback(ar, skb);
455 wiphy_name(ar->hw->wiphy)); 451 } else {
456 ar9170_print_txheader(ar, skb); 452 arinfo->timeout = jiffies +
457#endif /* AR9170_QUEUE_DEBUG */ 453 msecs_to_jiffies(AR9170_TX_TIMEOUT);
458 dev_kfree_skb_any(skb); 454
455 skb_queue_tail(&ar->tx_status[queue], skb);
456 }
459 } 457 }
460 458
461 if (!ar->tx_stats[queue].len && 459 if (!ar->tx_stats[queue].len &&
@@ -1407,17 +1405,6 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1407 1405
1408 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && 1406 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1409 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) { 1407 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1410 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1411 if (unlikely(!info->control.sta))
1412 goto err_out;
1413
1414 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1415 arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
1416
1417 goto out;
1418 }
1419
1420 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1421 /* 1408 /*
1422 * WARNING: 1409 * WARNING:
1423 * Putting the QoS queue bits into an unexplored territory is 1410 * Putting the QoS queue bits into an unexplored territory is
@@ -1431,12 +1418,17 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1431 1418
1432 txc->phy_control |= 1419 txc->phy_control |=
1433 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT); 1420 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1434 arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK; 1421
1435 } else { 1422 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1436 arinfo->flags = AR9170_TX_FLAG_NO_ACK; 1423 if (unlikely(!info->control.sta))
1424 goto err_out;
1425
1426 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1427 } else {
1428 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1429 }
1437 } 1430 }
1438 1431
1439out:
1440 return 0; 1432 return 0;
1441 1433
1442err_out: 1434err_out:
@@ -1671,8 +1663,7 @@ static bool ar9170_tx_ampdu(struct ar9170 *ar)
1671 * tell the FW/HW that this is the last frame, 1663 * tell the FW/HW that this is the last frame,
1672 * that way it will wait for the immediate block ack. 1664 * that way it will wait for the immediate block ack.
1673 */ 1665 */
1674 if (likely(skb_peek_tail(&agg))) 1666 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1675 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1676 1667
1677#ifdef AR9170_TXAGG_DEBUG 1668#ifdef AR9170_TXAGG_DEBUG
1678 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n", 1669 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
@@ -1716,6 +1707,21 @@ static void ar9170_tx(struct ar9170 *ar)
1716 1707
1717 for (i = 0; i < __AR9170_NUM_TXQ; i++) { 1708 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1718 spin_lock_irqsave(&ar->tx_stats_lock, flags); 1709 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1710 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1711 skb_queue_len(&ar->tx_pending[i]));
1712
1713 if (remaining_space < frames) {
1714#ifdef AR9170_QUEUE_DEBUG
1715 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1716 "remaining slots:%d, needed:%d\n",
1717 wiphy_name(ar->hw->wiphy), i, remaining_space,
1718 frames);
1719#endif /* AR9170_QUEUE_DEBUG */
1720 frames = remaining_space;
1721 }
1722
1723 ar->tx_stats[i].len += frames;
1724 ar->tx_stats[i].count += frames;
1719 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) { 1725 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1720#ifdef AR9170_QUEUE_DEBUG 1726#ifdef AR9170_QUEUE_DEBUG
1721 printk(KERN_DEBUG "%s: queue %d full\n", 1727 printk(KERN_DEBUG "%s: queue %d full\n",
@@ -1733,25 +1739,8 @@ static void ar9170_tx(struct ar9170 *ar)
1733 __ar9170_dump_txstats(ar); 1739 __ar9170_dump_txstats(ar);
1734#endif /* AR9170_QUEUE_STOP_DEBUG */ 1740#endif /* AR9170_QUEUE_STOP_DEBUG */
1735 ieee80211_stop_queue(ar->hw, i); 1741 ieee80211_stop_queue(ar->hw, i);
1736 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1737 continue;
1738 } 1742 }
1739 1743
1740 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1741 skb_queue_len(&ar->tx_pending[i]));
1742
1743 if (remaining_space < frames) {
1744#ifdef AR9170_QUEUE_DEBUG
1745 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1746 "remaining slots:%d, needed:%d\n",
1747 wiphy_name(ar->hw->wiphy), i, remaining_space,
1748 frames);
1749#endif /* AR9170_QUEUE_DEBUG */
1750 frames = remaining_space;
1751 }
1752
1753 ar->tx_stats[i].len += frames;
1754 ar->tx_stats[i].count += frames;
1755 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 1744 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1756 1745
1757 if (!frames) 1746 if (!frames)
@@ -1773,7 +1762,7 @@ static void ar9170_tx(struct ar9170 *ar)
1773 arinfo->timeout = jiffies + 1762 arinfo->timeout = jiffies +
1774 msecs_to_jiffies(AR9170_TX_TIMEOUT); 1763 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1775 1764
1776 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1765 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1777 atomic_inc(&ar->tx_ampdu_pending); 1766 atomic_inc(&ar->tx_ampdu_pending);
1778 1767
1779#ifdef AR9170_QUEUE_DEBUG 1768#ifdef AR9170_QUEUE_DEBUG
@@ -1784,7 +1773,7 @@ static void ar9170_tx(struct ar9170 *ar)
1784 1773
1785 err = ar->tx(ar, skb); 1774 err = ar->tx(ar, skb);
1786 if (unlikely(err)) { 1775 if (unlikely(err)) {
1787 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1776 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1788 atomic_dec(&ar->tx_ampdu_pending); 1777 atomic_dec(&ar->tx_ampdu_pending);
1789 1778
1790 frames_failed++; 1779 frames_failed++;
@@ -1950,7 +1939,7 @@ err_free:
1950} 1939}
1951 1940
1952static int ar9170_op_add_interface(struct ieee80211_hw *hw, 1941static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1953 struct ieee80211_if_init_conf *conf) 1942 struct ieee80211_vif *vif)
1954{ 1943{
1955 struct ar9170 *ar = hw->priv; 1944 struct ar9170 *ar = hw->priv;
1956 struct ath_common *common = &ar->common; 1945 struct ath_common *common = &ar->common;
@@ -1963,8 +1952,8 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1963 goto unlock; 1952 goto unlock;
1964 } 1953 }
1965 1954
1966 ar->vif = conf->vif; 1955 ar->vif = vif;
1967 memcpy(common->macaddr, conf->mac_addr, ETH_ALEN); 1956 memcpy(common->macaddr, vif->addr, ETH_ALEN);
1968 1957
1969 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) { 1958 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1970 ar->rx_software_decryption = true; 1959 ar->rx_software_decryption = true;
@@ -1984,7 +1973,7 @@ unlock:
1984} 1973}
1985 1974
1986static void ar9170_op_remove_interface(struct ieee80211_hw *hw, 1975static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1987 struct ieee80211_if_init_conf *conf) 1976 struct ieee80211_vif *vif)
1988{ 1977{
1989 struct ar9170 *ar = hw->priv; 1978 struct ar9170 *ar = hw->priv;
1990 1979
@@ -2366,7 +2355,6 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw,
2366 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN; 2355 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
2367 sta_info->agg[i].active = false; 2356 sta_info->agg[i].active = false;
2368 sta_info->agg[i].ssn = 0; 2357 sta_info->agg[i].ssn = 0;
2369 sta_info->agg[i].retry = 0;
2370 sta_info->agg[i].tid = i; 2358 sta_info->agg[i].tid = i;
2371 INIT_LIST_HEAD(&sta_info->agg[i].list); 2359 INIT_LIST_HEAD(&sta_info->agg[i].list);
2372 skb_queue_head_init(&sta_info->agg[i].queue); 2360 skb_queue_head_init(&sta_info->agg[i].queue);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0799d92405..0f361186b78 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -84,6 +84,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
84 { USB_DEVICE(0x0cde, 0x0023) }, 84 { USB_DEVICE(0x0cde, 0x0023) },
85 /* Z-Com UB82 ABG */ 85 /* Z-Com UB82 ABG */
86 { USB_DEVICE(0x0cde, 0x0026) }, 86 { USB_DEVICE(0x0cde, 0x0026) },
87 /* Sphairon Homelink 1202 */
88 { USB_DEVICE(0x0cde, 0x0027) },
87 /* Arcadyan WN7512 */ 89 /* Arcadyan WN7512 */
88 { USB_DEVICE(0x083a, 0xf522) }, 90 { USB_DEVICE(0x083a, 0xf522) },
89 /* Planex GWUS300 */ 91 /* Planex GWUS300 */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6a2a9676111..66bcb506a11 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1063,6 +1063,7 @@ struct ath5k_hw {
1063 u32 ah_cw_min; 1063 u32 ah_cw_min;
1064 u32 ah_cw_max; 1064 u32 ah_cw_max;
1065 u32 ah_limit_tx_retries; 1065 u32 ah_limit_tx_retries;
1066 u8 ah_coverage_class;
1066 1067
1067 /* Antenna Control */ 1068 /* Antenna Control */
1068 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; 1069 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1200,6 +1201,7 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1200 1201
1201/* Protocol Control Unit Functions */ 1202/* Protocol Control Unit Functions */
1202extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1203extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
1204extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
1203/* BSSID Functions */ 1205/* BSSID Functions */
1204extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1206extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1205extern void ath5k_hw_set_associd(struct ath5k_hw *ah); 1207extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
@@ -1231,6 +1233,10 @@ extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
1231extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah); 1233extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
1232extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout); 1234extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
1233extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah); 1235extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
1236/* Clock rate related functions */
1237unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
1238unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
1239unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
1234/* Key table (WEP) functions */ 1240/* Key table (WEP) functions */
1235extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry); 1241extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
1236extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry); 1242extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
@@ -1310,24 +1316,6 @@ extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
1310 * Functions used internaly 1316 * Functions used internaly
1311 */ 1317 */
1312 1318
1313/*
1314 * Translate usec to hw clock units
1315 * TODO: Half/quarter rate
1316 */
1317static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
1318{
1319 return turbo ? (usec * 80) : (usec * 40);
1320}
1321
1322/*
1323 * Translate hw clock units to usec
1324 * TODO: Half/quarter rate
1325 */
1326static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
1327{
1328 return turbo ? (clock / 80) : (clock / 40);
1329}
1330
1331static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah) 1319static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
1332{ 1320{
1333 return &ah->common; 1321 return &ah->common;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index e63b7c40d0e..5577bcc80ea 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -83,7 +83,7 @@ MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
83 83
84 84
85/* Known PCI ids */ 85/* Known PCI ids */
86static const struct pci_device_id ath5k_pci_id_table[] = { 86static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
87 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */ 87 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
88 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */ 88 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
89 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/ 89 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
@@ -225,9 +225,9 @@ static int ath5k_reset_wake(struct ath5k_softc *sc);
225static int ath5k_start(struct ieee80211_hw *hw); 225static int ath5k_start(struct ieee80211_hw *hw);
226static void ath5k_stop(struct ieee80211_hw *hw); 226static void ath5k_stop(struct ieee80211_hw *hw);
227static int ath5k_add_interface(struct ieee80211_hw *hw, 227static int ath5k_add_interface(struct ieee80211_hw *hw,
228 struct ieee80211_if_init_conf *conf); 228 struct ieee80211_vif *vif);
229static void ath5k_remove_interface(struct ieee80211_hw *hw, 229static void ath5k_remove_interface(struct ieee80211_hw *hw,
230 struct ieee80211_if_init_conf *conf); 230 struct ieee80211_vif *vif);
231static int ath5k_config(struct ieee80211_hw *hw, u32 changed); 231static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
232static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, 232static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
233 int mc_count, struct dev_addr_list *mc_list); 233 int mc_count, struct dev_addr_list *mc_list);
@@ -254,6 +254,8 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
254 u32 changes); 254 u32 changes);
255static void ath5k_sw_scan_start(struct ieee80211_hw *hw); 255static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
256static void ath5k_sw_scan_complete(struct ieee80211_hw *hw); 256static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
257static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
258 u8 coverage_class);
257 259
258static const struct ieee80211_ops ath5k_hw_ops = { 260static const struct ieee80211_ops ath5k_hw_ops = {
259 .tx = ath5k_tx, 261 .tx = ath5k_tx,
@@ -274,6 +276,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
274 .bss_info_changed = ath5k_bss_info_changed, 276 .bss_info_changed = ath5k_bss_info_changed,
275 .sw_scan_start = ath5k_sw_scan_start, 277 .sw_scan_start = ath5k_sw_scan_start,
276 .sw_scan_complete = ath5k_sw_scan_complete, 278 .sw_scan_complete = ath5k_sw_scan_complete,
279 .set_coverage_class = ath5k_set_coverage_class,
277}; 280};
278 281
279/* 282/*
@@ -2773,7 +2776,7 @@ static void ath5k_stop(struct ieee80211_hw *hw)
2773} 2776}
2774 2777
2775static int ath5k_add_interface(struct ieee80211_hw *hw, 2778static int ath5k_add_interface(struct ieee80211_hw *hw,
2776 struct ieee80211_if_init_conf *conf) 2779 struct ieee80211_vif *vif)
2777{ 2780{
2778 struct ath5k_softc *sc = hw->priv; 2781 struct ath5k_softc *sc = hw->priv;
2779 int ret; 2782 int ret;
@@ -2784,22 +2787,22 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2784 goto end; 2787 goto end;
2785 } 2788 }
2786 2789
2787 sc->vif = conf->vif; 2790 sc->vif = vif;
2788 2791
2789 switch (conf->type) { 2792 switch (vif->type) {
2790 case NL80211_IFTYPE_AP: 2793 case NL80211_IFTYPE_AP:
2791 case NL80211_IFTYPE_STATION: 2794 case NL80211_IFTYPE_STATION:
2792 case NL80211_IFTYPE_ADHOC: 2795 case NL80211_IFTYPE_ADHOC:
2793 case NL80211_IFTYPE_MESH_POINT: 2796 case NL80211_IFTYPE_MESH_POINT:
2794 case NL80211_IFTYPE_MONITOR: 2797 case NL80211_IFTYPE_MONITOR:
2795 sc->opmode = conf->type; 2798 sc->opmode = vif->type;
2796 break; 2799 break;
2797 default: 2800 default:
2798 ret = -EOPNOTSUPP; 2801 ret = -EOPNOTSUPP;
2799 goto end; 2802 goto end;
2800 } 2803 }
2801 2804
2802 ath5k_hw_set_lladdr(sc->ah, conf->mac_addr); 2805 ath5k_hw_set_lladdr(sc->ah, vif->addr);
2803 ath5k_mode_setup(sc); 2806 ath5k_mode_setup(sc);
2804 2807
2805 ret = 0; 2808 ret = 0;
@@ -2810,13 +2813,13 @@ end:
2810 2813
2811static void 2814static void
2812ath5k_remove_interface(struct ieee80211_hw *hw, 2815ath5k_remove_interface(struct ieee80211_hw *hw,
2813 struct ieee80211_if_init_conf *conf) 2816 struct ieee80211_vif *vif)
2814{ 2817{
2815 struct ath5k_softc *sc = hw->priv; 2818 struct ath5k_softc *sc = hw->priv;
2816 u8 mac[ETH_ALEN] = {}; 2819 u8 mac[ETH_ALEN] = {};
2817 2820
2818 mutex_lock(&sc->lock); 2821 mutex_lock(&sc->lock);
2819 if (sc->vif != conf->vif) 2822 if (sc->vif != vif)
2820 goto end; 2823 goto end;
2821 2824
2822 ath5k_hw_set_lladdr(sc->ah, mac); 2825 ath5k_hw_set_lladdr(sc->ah, mac);
@@ -3262,3 +3265,22 @@ static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
3262 ath5k_hw_set_ledstate(sc->ah, sc->assoc ? 3265 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3263 AR5K_LED_ASSOC : AR5K_LED_INIT); 3266 AR5K_LED_ASSOC : AR5K_LED_INIT);
3264} 3267}
3268
3269/**
3270 * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
3271 *
3272 * @hw: struct ieee80211_hw pointer
3273 * @coverage_class: IEEE 802.11 coverage class number
3274 *
3275 * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
3276 * coverage class. The values are persistent, they are restored after device
3277 * reset.
3278 */
3279static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
3280{
3281 struct ath5k_softc *sc = hw->priv;
3282
3283 mutex_lock(&sc->lock);
3284 ath5k_hw_set_coverage_class(sc->ah, coverage_class);
3285 mutex_unlock(&sc->lock);
3286}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 64fc1eb9b6d..aefe84f9c04 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -187,8 +187,8 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
187{ 187{
188 ATH5K_TRACE(ah->ah_sc); 188 ATH5K_TRACE(ah->ah_sc);
189 189
190 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah, 190 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
191 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo); 191 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
192} 192}
193 193
194/** 194/**
@@ -200,12 +200,12 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
200int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) 200int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
201{ 201{
202 ATH5K_TRACE(ah->ah_sc); 202 ATH5K_TRACE(ah->ah_sc);
203 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK), 203 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
204 ah->ah_turbo) <= timeout) 204 <= timeout)
205 return -EINVAL; 205 return -EINVAL;
206 206
207 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK, 207 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
208 ath5k_hw_htoclock(timeout, ah->ah_turbo)); 208 ath5k_hw_htoclock(ah, timeout));
209 209
210 return 0; 210 return 0;
211} 211}
@@ -218,8 +218,8 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
218unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah) 218unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
219{ 219{
220 ATH5K_TRACE(ah->ah_sc); 220 ATH5K_TRACE(ah->ah_sc);
221 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah, 221 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
222 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo); 222 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
223} 223}
224 224
225/** 225/**
@@ -231,17 +231,97 @@ unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
231int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) 231int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
232{ 232{
233 ATH5K_TRACE(ah->ah_sc); 233 ATH5K_TRACE(ah->ah_sc);
234 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS), 234 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
235 ah->ah_turbo) <= timeout) 235 <= timeout)
236 return -EINVAL; 236 return -EINVAL;
237 237
238 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS, 238 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
239 ath5k_hw_htoclock(timeout, ah->ah_turbo)); 239 ath5k_hw_htoclock(ah, timeout));
240 240
241 return 0; 241 return 0;
242} 242}
243 243
244/** 244/**
245 * ath5k_hw_htoclock - Translate usec to hw clock units
246 *
247 * @ah: The &struct ath5k_hw
248 * @usec: value in microseconds
249 */
250unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
251{
252 return usec * ath5k_hw_get_clockrate(ah);
253}
254
255/**
256 * ath5k_hw_clocktoh - Translate hw clock units to usec
257 * @clock: value in hw clock units
258 */
259unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
260{
261 return clock / ath5k_hw_get_clockrate(ah);
262}
263
264/**
265 * ath5k_hw_get_clockrate - Get the clock rate for current mode
266 *
267 * @ah: The &struct ath5k_hw
268 */
269unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
270{
271 struct ieee80211_channel *channel = ah->ah_current_channel;
272 int clock;
273
274 if (channel->hw_value & CHANNEL_5GHZ)
275 clock = 40; /* 802.11a */
276 else if (channel->hw_value & CHANNEL_CCK)
277 clock = 22; /* 802.11b */
278 else
279 clock = 44; /* 802.11g */
280
281 /* Clock rate in turbo modes is twice the normal rate */
282 if (channel->hw_value & CHANNEL_TURBO)
283 clock *= 2;
284
285 return clock;
286}
287
288/**
289 * ath5k_hw_get_default_slottime - Get the default slot time for current mode
290 *
291 * @ah: The &struct ath5k_hw
292 */
293unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
294{
295 struct ieee80211_channel *channel = ah->ah_current_channel;
296
297 if (channel->hw_value & CHANNEL_TURBO)
298 return 6; /* both turbo modes */
299
300 if (channel->hw_value & CHANNEL_CCK)
301 return 20; /* 802.11b */
302
303 return 9; /* 802.11 a/g */
304}
305
306/**
307 * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
308 *
309 * @ah: The &struct ath5k_hw
310 */
311unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
312{
313 struct ieee80211_channel *channel = ah->ah_current_channel;
314
315 if (channel->hw_value & CHANNEL_TURBO)
316 return 8; /* both turbo modes */
317
318 if (channel->hw_value & CHANNEL_5GHZ)
319 return 16; /* 802.11a */
320
321 return 10; /* 802.11 b/g */
322}
323
324/**
245 * ath5k_hw_set_lladdr - Set station id 325 * ath5k_hw_set_lladdr - Set station id
246 * 326 *
247 * @ah: The &struct ath5k_hw 327 * @ah: The &struct ath5k_hw
@@ -1050,3 +1130,24 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
1050 return 0; 1130 return 0;
1051} 1131}
1052 1132
1133/**
1134 * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
1135 *
1136 * @ah: The &struct ath5k_hw
1137 * @coverage_class: IEEE 802.11 coverage class number
1138 *
1139 * Sets slot time, ACK timeout and CTS timeout for given coverage class.
1140 */
1141void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
1142{
1143 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1144 int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
1145 int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
1146 int cts_timeout = ack_timeout;
1147
1148 ath5k_hw_set_slot_time(ah, slot_time);
1149 ath5k_hw_set_ack_timeout(ah, ack_timeout);
1150 ath5k_hw_set_cts_timeout(ah, cts_timeout);
1151
1152 ah->ah_coverage_class = coverage_class;
1153}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index eeebb9aef20..abe36c0d139 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -520,12 +520,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
520 */ 520 */
521unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah) 521unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
522{ 522{
523 unsigned int slot_time_clock;
524
523 ATH5K_TRACE(ah->ah_sc); 525 ATH5K_TRACE(ah->ah_sc);
526
524 if (ah->ah_version == AR5K_AR5210) 527 if (ah->ah_version == AR5K_AR5210)
525 return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah, 528 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
526 AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
527 else 529 else
528 return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff; 530 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
531
532 return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
529} 533}
530 534
531/* 535/*
@@ -533,15 +537,17 @@ unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
533 */ 537 */
534int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) 538int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
535{ 539{
540 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
541
536 ATH5K_TRACE(ah->ah_sc); 542 ATH5K_TRACE(ah->ah_sc);
537 if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX) 543
544 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
538 return -EINVAL; 545 return -EINVAL;
539 546
540 if (ah->ah_version == AR5K_AR5210) 547 if (ah->ah_version == AR5K_AR5210)
541 ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time, 548 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
542 ah->ah_turbo), AR5K_SLOT_TIME);
543 else 549 else
544 ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT); 550 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
545 551
546 return 0; 552 return 0;
547} 553}
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 62954fc7786..6690923fd78 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -60,12 +60,11 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
60 !(channel->hw_value & CHANNEL_OFDM)); 60 !(channel->hw_value & CHANNEL_OFDM));
61 61
62 /* Get coefficient 62 /* Get coefficient
63 * ALGO: coef = (5 * clock * carrier_freq) / 2) 63 * ALGO: coef = (5 * clock / carrier_freq) / 2
64 * we scale coef by shifting clock value by 24 for 64 * we scale coef by shifting clock value by 24 for
65 * better precision since we use integers */ 65 * better precision since we use integers */
66 /* TODO: Half/quarter rate */ 66 /* TODO: Half/quarter rate */
67 clock = ath5k_hw_htoclock(1, channel->hw_value & CHANNEL_TURBO); 67 clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40;
68
69 coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq; 68 coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
70 69
71 /* Get exponent 70 /* Get exponent
@@ -1317,6 +1316,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1317 /* Restore antenna mode */ 1316 /* Restore antenna mode */
1318 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode); 1317 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
1319 1318
1319 /* Restore slot time and ACK timeouts */
1320 if (ah->ah_coverage_class > 0)
1321 ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
1322
1320 /* 1323 /*
1321 * Configure QCUs/DCUs 1324 * Configure QCUs/DCUs
1322 */ 1325 */
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4985b2b1b0a..6b50d5eb9ec 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,4 +1,6 @@
1ath9k-y += beacon.o \ 1ath9k-y += beacon.o \
2 gpio.o \
3 init.o \
2 main.o \ 4 main.o \
3 recv.o \ 5 recv.o \
4 xmit.o \ 6 xmit.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 329e6bc137a..9e62a569e81 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -121,16 +121,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
121 sc->mem = mem; 121 sc->mem = mem;
122 sc->irq = irq; 122 sc->irq = irq;
123 123
124 ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops); 124 /* Will be cleared in ath9k_start() */
125 sc->sc_flags |= SC_OP_INVALID;
126
127 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
125 if (ret) { 128 if (ret) {
126 dev_err(&pdev->dev, "failed to initialize device\n"); 129 dev_err(&pdev->dev, "request_irq failed\n");
127 goto err_free_hw; 130 goto err_free_hw;
128 } 131 }
129 132
130 ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc); 133 ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
131 if (ret) { 134 if (ret) {
132 dev_err(&pdev->dev, "request_irq failed\n"); 135 dev_err(&pdev->dev, "failed to initialize device\n");
133 goto err_detach; 136 goto err_irq;
134 } 137 }
135 138
136 ah = sc->sc_ah; 139 ah = sc->sc_ah;
@@ -143,8 +146,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
143 146
144 return 0; 147 return 0;
145 148
146 err_detach: 149 err_irq:
147 ath_detach(sc); 150 free_irq(irq, sc);
148 err_free_hw: 151 err_free_hw:
149 ieee80211_free_hw(hw); 152 ieee80211_free_hw(hw);
150 platform_set_drvdata(pdev, NULL); 153 platform_set_drvdata(pdev, NULL);
@@ -161,8 +164,12 @@ static int ath_ahb_remove(struct platform_device *pdev)
161 if (hw) { 164 if (hw) {
162 struct ath_wiphy *aphy = hw->priv; 165 struct ath_wiphy *aphy = hw->priv;
163 struct ath_softc *sc = aphy->sc; 166 struct ath_softc *sc = aphy->sc;
167 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
164 168
165 ath_cleanup(sc); 169 ath9k_deinit_device(sc);
170 free_irq(sc->irq, sc);
171 ieee80211_free_hw(sc->hw);
172 ath_bus_cleanup(common);
166 platform_set_drvdata(pdev, NULL); 173 platform_set_drvdata(pdev, NULL);
167 } 174 }
168 175
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1597a42731e..bf3d4c4bfa5 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -341,6 +341,12 @@ int ath_beaconq_config(struct ath_softc *sc);
341#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 341#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
342#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 342#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
343 343
344void ath_ani_calibrate(unsigned long data);
345
346/**********/
347/* BTCOEX */
348/**********/
349
344/* Defines the BT AR_BT_COEX_WGHT used */ 350/* Defines the BT AR_BT_COEX_WGHT used */
345enum ath_stomp_type { 351enum ath_stomp_type {
346 ATH_BTCOEX_NO_STOMP, 352 ATH_BTCOEX_NO_STOMP,
@@ -361,6 +367,10 @@ struct ath_btcoex {
361 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ 367 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
362}; 368};
363 369
370int ath_init_btcoex_timer(struct ath_softc *sc);
371void ath9k_btcoex_timer_resume(struct ath_softc *sc);
372void ath9k_btcoex_timer_pause(struct ath_softc *sc);
373
364/********************/ 374/********************/
365/* LED Control */ 375/* LED Control */
366/********************/ 376/********************/
@@ -385,6 +395,9 @@ struct ath_led {
385 bool registered; 395 bool registered;
386}; 396};
387 397
398void ath_init_leds(struct ath_softc *sc);
399void ath_deinit_leds(struct ath_softc *sc);
400
388/********************/ 401/********************/
389/* Main driver core */ 402/* Main driver core */
390/********************/ 403/********************/
@@ -403,26 +416,28 @@ struct ath_led {
403#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 416#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
404#define ATH_RATE_DUMMY_MARKER 0 417#define ATH_RATE_DUMMY_MARKER 0
405 418
406#define SC_OP_INVALID BIT(0) 419#define SC_OP_INVALID BIT(0)
407#define SC_OP_BEACONS BIT(1) 420#define SC_OP_BEACONS BIT(1)
408#define SC_OP_RXAGGR BIT(2) 421#define SC_OP_RXAGGR BIT(2)
409#define SC_OP_TXAGGR BIT(3) 422#define SC_OP_TXAGGR BIT(3)
410#define SC_OP_FULL_RESET BIT(4) 423#define SC_OP_FULL_RESET BIT(4)
411#define SC_OP_PREAMBLE_SHORT BIT(5) 424#define SC_OP_PREAMBLE_SHORT BIT(5)
412#define SC_OP_PROTECT_ENABLE BIT(6) 425#define SC_OP_PROTECT_ENABLE BIT(6)
413#define SC_OP_RXFLUSH BIT(7) 426#define SC_OP_RXFLUSH BIT(7)
414#define SC_OP_LED_ASSOCIATED BIT(8) 427#define SC_OP_LED_ASSOCIATED BIT(8)
415#define SC_OP_WAIT_FOR_BEACON BIT(12) 428#define SC_OP_LED_ON BIT(9)
416#define SC_OP_LED_ON BIT(13) 429#define SC_OP_SCANNING BIT(10)
417#define SC_OP_SCANNING BIT(14) 430#define SC_OP_TSF_RESET BIT(11)
418#define SC_OP_TSF_RESET BIT(15) 431#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
419#define SC_OP_WAIT_FOR_CAB BIT(16) 432
420#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17) 433/* Powersave flags */
421#define SC_OP_WAIT_FOR_TX_ACK BIT(18) 434#define PS_WAIT_FOR_BEACON BIT(0)
422#define SC_OP_BEACON_SYNC BIT(19) 435#define PS_WAIT_FOR_CAB BIT(1)
423#define SC_OP_BT_PRIORITY_DETECTED BIT(21) 436#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
424#define SC_OP_NULLFUNC_COMPLETED BIT(22) 437#define PS_WAIT_FOR_TX_ACK BIT(3)
425#define SC_OP_PS_ENABLED BIT(23) 438#define PS_BEACON_SYNC BIT(4)
439#define PS_NULLFUNC_COMPLETED BIT(5)
440#define PS_ENABLED BIT(6)
426 441
427struct ath_wiphy; 442struct ath_wiphy;
428struct ath_rate_table; 443struct ath_rate_table;
@@ -453,12 +468,12 @@ struct ath_softc {
453 int irq; 468 int irq;
454 spinlock_t sc_resetlock; 469 spinlock_t sc_resetlock;
455 spinlock_t sc_serial_rw; 470 spinlock_t sc_serial_rw;
456 spinlock_t ani_lock;
457 spinlock_t sc_pm_lock; 471 spinlock_t sc_pm_lock;
458 struct mutex mutex; 472 struct mutex mutex;
459 473
460 u32 intrstatus; 474 u32 intrstatus;
461 u32 sc_flags; /* SC_OP_* */ 475 u32 sc_flags; /* SC_OP_* */
476 u16 ps_flags; /* PS_* */
462 u16 curtxpow; 477 u16 curtxpow;
463 u8 nbcnvifs; 478 u8 nbcnvifs;
464 u16 nvifs; 479 u16 nvifs;
@@ -509,6 +524,7 @@ struct ath_wiphy {
509 int chan_is_ht; 524 int chan_is_ht;
510}; 525};
511 526
527void ath9k_tasklet(unsigned long data);
512int ath_reset(struct ath_softc *sc, bool retry_tx); 528int ath_reset(struct ath_softc *sc, bool retry_tx);
513int ath_get_hal_qnum(u16 queue, struct ath_softc *sc); 529int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
514int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); 530int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
@@ -525,15 +541,15 @@ static inline void ath_bus_cleanup(struct ath_common *common)
525} 541}
526 542
527extern struct ieee80211_ops ath9k_ops; 543extern struct ieee80211_ops ath9k_ops;
544extern int modparam_nohwcrypt;
528 545
529irqreturn_t ath_isr(int irq, void *dev); 546irqreturn_t ath_isr(int irq, void *dev);
530void ath_cleanup(struct ath_softc *sc); 547int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
531int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
532 const struct ath_bus_ops *bus_ops); 548 const struct ath_bus_ops *bus_ops);
533void ath_detach(struct ath_softc *sc); 549void ath9k_deinit_device(struct ath_softc *sc);
534const char *ath_mac_bb_name(u32 mac_bb_version); 550const char *ath_mac_bb_name(u32 mac_bb_version);
535const char *ath_rf_name(u16 rf_version); 551const char *ath_rf_name(u16 rf_version);
536void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); 552void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
537void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, 553void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
538 struct ath9k_channel *ichan); 554 struct ath9k_channel *ichan);
539void ath_update_chainmask(struct ath_softc *sc, int is_ht); 555void ath_update_chainmask(struct ath_softc *sc, int is_ht);
@@ -542,6 +558,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
542 558
543void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw); 559void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
544void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw); 560void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
561bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
545 562
546#ifdef CONFIG_PCI 563#ifdef CONFIG_PCI
547int ath_pci_init(void); 564int ath_pci_init(void);
@@ -583,4 +600,8 @@ void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
583void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue); 600void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
584 601
585int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); 602int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
603
604void ath_start_rfkill_poll(struct ath_softc *sc);
605extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
606
586#endif /* ATH9K_H */ 607#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1660ef17aaf..422454fe4ff 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -480,7 +480,8 @@ void ath_beacon_tasklet(unsigned long data)
480 sc->beacon.updateslot = COMMIT; /* commit next beacon */ 480 sc->beacon.updateslot = COMMIT; /* commit next beacon */
481 sc->beacon.slotupdate = slot; 481 sc->beacon.slotupdate = slot;
482 } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) { 482 } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
483 ath9k_hw_setslottime(sc->sc_ah, sc->beacon.slottime); 483 ah->slottime = sc->beacon.slottime;
484 ath9k_hw_init_global_settings(ah);
484 sc->beacon.updateslot = OK; 485 sc->beacon.updateslot = OK;
485 } 486 }
486 if (bfaddr != 0) { 487 if (bfaddr != 0) {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index b66f72dbf7b..9489b6b25b5 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -289,23 +289,49 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
289 if (sc->cur_rate_table == NULL) 289 if (sc->cur_rate_table == NULL)
290 return 0; 290 return 0;
291 291
292 max = 80 + sc->cur_rate_table->rate_cnt * 64; 292 max = 80 + sc->cur_rate_table->rate_cnt * 1024;
293 buf = kmalloc(max + 1, GFP_KERNEL); 293 buf = kmalloc(max + 1, GFP_KERNEL);
294 if (buf == NULL) 294 if (buf == NULL)
295 return 0; 295 return 0;
296 buf[max] = 0; 296 buf[max] = 0;
297 297
298 len += sprintf(buf, "%5s %15s %8s %9s %3s\n\n", "Rate", "Success", 298 len += sprintf(buf, "%6s %6s %6s "
299 "Retries", "XRetries", "PER"); 299 "%10s %10s %10s %10s\n",
300 "HT", "MCS", "Rate",
301 "Success", "Retries", "XRetries", "PER");
300 302
301 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) { 303 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
302 u32 ratekbps = sc->cur_rate_table->info[i].ratekbps; 304 u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
303 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i]; 305 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
306 char mcs[5];
307 char htmode[5];
308 int used_mcs = 0, used_htmode = 0;
309
310 if (WLAN_RC_PHY_HT(sc->cur_rate_table->info[i].phy)) {
311 used_mcs = snprintf(mcs, 5, "%d",
312 sc->cur_rate_table->info[i].ratecode);
313
314 if (WLAN_RC_PHY_40(sc->cur_rate_table->info[i].phy))
315 used_htmode = snprintf(htmode, 5, "HT40");
316 else if (WLAN_RC_PHY_20(sc->cur_rate_table->info[i].phy))
317 used_htmode = snprintf(htmode, 5, "HT20");
318 else
319 used_htmode = snprintf(htmode, 5, "????");
320 }
321
322 mcs[used_mcs] = '\0';
323 htmode[used_htmode] = '\0';
304 324
305 len += snprintf(buf + len, max - len, 325 len += snprintf(buf + len, max - len,
306 "%3u.%d: %8u %8u %8u %8u\n", ratekbps / 1000, 326 "%6s %6s %3u.%d: "
307 (ratekbps % 1000) / 100, stats->success, 327 "%10u %10u %10u %10u\n",
308 stats->retries, stats->xretries, 328 htmode,
329 mcs,
330 ratekbps / 1000,
331 (ratekbps % 1000) / 100,
332 stats->success,
333 stats->retries,
334 stats->xretries,
309 stats->per); 335 stats->per);
310 } 336 }
311 337
@@ -554,6 +580,116 @@ static const struct file_operations fops_xmit = {
554 .owner = THIS_MODULE 580 .owner = THIS_MODULE
555}; 581};
556 582
583static ssize_t read_file_recv(struct file *file, char __user *user_buf,
584 size_t count, loff_t *ppos)
585{
586#define PHY_ERR(s, p) \
587 len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \
588 sc->debug.stats.rxstats.phy_err_stats[p]);
589
590 struct ath_softc *sc = file->private_data;
591 char *buf;
592 unsigned int len = 0, size = 1152;
593 ssize_t retval = 0;
594
595 buf = kzalloc(size, GFP_KERNEL);
596 if (buf == NULL)
597 return 0;
598
599 len += snprintf(buf + len, size - len,
600 "%18s : %10u\n", "CRC ERR",
601 sc->debug.stats.rxstats.crc_err);
602 len += snprintf(buf + len, size - len,
603 "%18s : %10u\n", "DECRYPT CRC ERR",
604 sc->debug.stats.rxstats.decrypt_crc_err);
605 len += snprintf(buf + len, size - len,
606 "%18s : %10u\n", "PHY ERR",
607 sc->debug.stats.rxstats.phy_err);
608 len += snprintf(buf + len, size - len,
609 "%18s : %10u\n", "MIC ERR",
610 sc->debug.stats.rxstats.mic_err);
611 len += snprintf(buf + len, size - len,
612 "%18s : %10u\n", "PRE-DELIM CRC ERR",
613 sc->debug.stats.rxstats.pre_delim_crc_err);
614 len += snprintf(buf + len, size - len,
615 "%18s : %10u\n", "POST-DELIM CRC ERR",
616 sc->debug.stats.rxstats.post_delim_crc_err);
617 len += snprintf(buf + len, size - len,
618 "%18s : %10u\n", "DECRYPT BUSY ERR",
619 sc->debug.stats.rxstats.decrypt_busy_err);
620
621 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
622 PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
623 PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
624 PHY_ERR("RATE", ATH9K_PHYERR_RATE);
625 PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
626 PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
627 PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
628 PHY_ERR("TOR", ATH9K_PHYERR_TOR);
629 PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
630 PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
631 PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
632 PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
633 PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
634 PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
635 PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
636 PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
637 PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
638 PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
639 PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
640 PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
641 PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
642 PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
643 PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
644 PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
645 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
646 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
647
648 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
649 kfree(buf);
650
651 return retval;
652
653#undef PHY_ERR
654}
655
656void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
657{
658#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
659#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
660
661 struct ath_desc *ds = bf->bf_desc;
662 u32 phyerr;
663
664 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
665 RX_STAT_INC(crc_err);
666 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
667 RX_STAT_INC(decrypt_crc_err);
668 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
669 RX_STAT_INC(mic_err);
670 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
671 RX_STAT_INC(pre_delim_crc_err);
672 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
673 RX_STAT_INC(post_delim_crc_err);
674 if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
675 RX_STAT_INC(decrypt_busy_err);
676
677 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
678 RX_STAT_INC(phy_err);
679 phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
680 RX_PHY_ERR_INC(phyerr);
681 }
682
683#undef RX_STAT_INC
684#undef RX_PHY_ERR_INC
685}
686
687static const struct file_operations fops_recv = {
688 .read = read_file_recv,
689 .open = ath9k_debugfs_open,
690 .owner = THIS_MODULE
691};
692
557int ath9k_init_debug(struct ath_hw *ah) 693int ath9k_init_debug(struct ath_hw *ah)
558{ 694{
559 struct ath_common *common = ath9k_hw_common(ah); 695 struct ath_common *common = ath9k_hw_common(ah);
@@ -606,6 +742,13 @@ int ath9k_init_debug(struct ath_hw *ah)
606 if (!sc->debug.debugfs_xmit) 742 if (!sc->debug.debugfs_xmit)
607 goto err; 743 goto err;
608 744
745 sc->debug.debugfs_recv = debugfs_create_file("recv",
746 S_IRUSR,
747 sc->debug.debugfs_phy,
748 sc, &fops_recv);
749 if (!sc->debug.debugfs_recv)
750 goto err;
751
609 return 0; 752 return 0;
610err: 753err:
611 ath9k_exit_debug(ah); 754 ath9k_exit_debug(ah);
@@ -617,6 +760,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
617 struct ath_common *common = ath9k_hw_common(ah); 760 struct ath_common *common = ath9k_hw_common(ah);
618 struct ath_softc *sc = (struct ath_softc *) common->priv; 761 struct ath_softc *sc = (struct ath_softc *) common->priv;
619 762
763 debugfs_remove(sc->debug.debugfs_recv);
620 debugfs_remove(sc->debug.debugfs_xmit); 764 debugfs_remove(sc->debug.debugfs_xmit);
621 debugfs_remove(sc->debug.debugfs_wiphy); 765 debugfs_remove(sc->debug.debugfs_wiphy);
622 debugfs_remove(sc->debug.debugfs_rcstat); 766 debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 536663e3ee1..86780e68b31 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,10 +116,35 @@ struct ath_tx_stats {
116 u32 delim_underrun; 116 u32 delim_underrun;
117}; 117};
118 118
119/**
120 * struct ath_rx_stats - RX Statistics
121 * @crc_err: No. of frames with incorrect CRC value
122 * @decrypt_crc_err: No. of frames whose CRC check failed after
123 decryption process completed
124 * @phy_err: No. of frames whose reception failed because the PHY
125 encountered an error
126 * @mic_err: No. of frames with incorrect TKIP MIC verification failure
127 * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
128 * @post_delim_crc_err: Post-Frame delimiter CRC error detections
129 * @decrypt_busy_err: Decryption interruptions counter
130 * @phy_err_stats: Individual PHY error statistics
131 */
132struct ath_rx_stats {
133 u32 crc_err;
134 u32 decrypt_crc_err;
135 u32 phy_err;
136 u32 mic_err;
137 u32 pre_delim_crc_err;
138 u32 post_delim_crc_err;
139 u32 decrypt_busy_err;
140 u32 phy_err_stats[ATH9K_PHYERR_MAX];
141};
142
119struct ath_stats { 143struct ath_stats {
120 struct ath_interrupt_stats istats; 144 struct ath_interrupt_stats istats;
121 struct ath_rc_stats rcstats[RATE_TABLE_SIZE]; 145 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
122 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; 146 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
147 struct ath_rx_stats rxstats;
123}; 148};
124 149
125struct ath9k_debug { 150struct ath9k_debug {
@@ -130,6 +155,7 @@ struct ath9k_debug {
130 struct dentry *debugfs_rcstat; 155 struct dentry *debugfs_rcstat;
131 struct dentry *debugfs_wiphy; 156 struct dentry *debugfs_wiphy;
132 struct dentry *debugfs_xmit; 157 struct dentry *debugfs_xmit;
158 struct dentry *debugfs_recv;
133 struct ath_stats stats; 159 struct ath_stats stats;
134}; 160};
135 161
@@ -142,6 +168,7 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
142void ath_debug_stat_rc(struct ath_softc *sc, int final_rate); 168void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
143void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
144 struct ath_buf *bf); 170 struct ath_buf *bf);
171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
145void ath_debug_stat_retries(struct ath_softc *sc, int rix, 172void ath_debug_stat_retries(struct ath_softc *sc, int rix,
146 int xretries, int retries, u8 per); 173 int xretries, int retries, u8 per);
147 174
@@ -181,6 +208,11 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
181{ 208{
182} 209}
183 210
211static inline void ath_debug_stat_rx(struct ath_softc *sc,
212 struct ath_buf *bf)
213{
214}
215
184static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix, 216static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
185 int xretries, int retries, u8 per) 217 int xretries, int retries, u8 per)
186{ 218{
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
new file mode 100644
index 00000000000..e204bd25ff6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -0,0 +1,428 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19/********************************/
20/* LED functions */
21/********************************/
22
23static void ath_led_blink_work(struct work_struct *work)
24{
25 struct ath_softc *sc = container_of(work, struct ath_softc,
26 ath_led_blink_work.work);
27
28 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
29 return;
30
31 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
32 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
33 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
34 else
35 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
36 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
37
38 ieee80211_queue_delayed_work(sc->hw,
39 &sc->ath_led_blink_work,
40 (sc->sc_flags & SC_OP_LED_ON) ?
41 msecs_to_jiffies(sc->led_off_duration) :
42 msecs_to_jiffies(sc->led_on_duration));
43
44 sc->led_on_duration = sc->led_on_cnt ?
45 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
46 ATH_LED_ON_DURATION_IDLE;
47 sc->led_off_duration = sc->led_off_cnt ?
48 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
49 ATH_LED_OFF_DURATION_IDLE;
50 sc->led_on_cnt = sc->led_off_cnt = 0;
51 if (sc->sc_flags & SC_OP_LED_ON)
52 sc->sc_flags &= ~SC_OP_LED_ON;
53 else
54 sc->sc_flags |= SC_OP_LED_ON;
55}
56
57static void ath_led_brightness(struct led_classdev *led_cdev,
58 enum led_brightness brightness)
59{
60 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
61 struct ath_softc *sc = led->sc;
62
63 switch (brightness) {
64 case LED_OFF:
65 if (led->led_type == ATH_LED_ASSOC ||
66 led->led_type == ATH_LED_RADIO) {
67 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
68 (led->led_type == ATH_LED_RADIO));
69 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
70 if (led->led_type == ATH_LED_RADIO)
71 sc->sc_flags &= ~SC_OP_LED_ON;
72 } else {
73 sc->led_off_cnt++;
74 }
75 break;
76 case LED_FULL:
77 if (led->led_type == ATH_LED_ASSOC) {
78 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
79 ieee80211_queue_delayed_work(sc->hw,
80 &sc->ath_led_blink_work, 0);
81 } else if (led->led_type == ATH_LED_RADIO) {
82 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
83 sc->sc_flags |= SC_OP_LED_ON;
84 } else {
85 sc->led_on_cnt++;
86 }
87 break;
88 default:
89 break;
90 }
91}
92
93static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
94 char *trigger)
95{
96 int ret;
97
98 led->sc = sc;
99 led->led_cdev.name = led->name;
100 led->led_cdev.default_trigger = trigger;
101 led->led_cdev.brightness_set = ath_led_brightness;
102
103 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
104 if (ret)
105 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
106 "Failed to register led:%s", led->name);
107 else
108 led->registered = 1;
109 return ret;
110}
111
112static void ath_unregister_led(struct ath_led *led)
113{
114 if (led->registered) {
115 led_classdev_unregister(&led->led_cdev);
116 led->registered = 0;
117 }
118}
119
120void ath_deinit_leds(struct ath_softc *sc)
121{
122 ath_unregister_led(&sc->assoc_led);
123 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
124 ath_unregister_led(&sc->tx_led);
125 ath_unregister_led(&sc->rx_led);
126 ath_unregister_led(&sc->radio_led);
127 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
128}
129
130void ath_init_leds(struct ath_softc *sc)
131{
132 char *trigger;
133 int ret;
134
135 if (AR_SREV_9287(sc->sc_ah))
136 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
137 else
138 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
139
140 /* Configure gpio 1 for output */
141 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
142 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
143 /* LED off, active low */
144 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
145
146 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
147
148 trigger = ieee80211_get_radio_led_name(sc->hw);
149 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
150 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
151 ret = ath_register_led(sc, &sc->radio_led, trigger);
152 sc->radio_led.led_type = ATH_LED_RADIO;
153 if (ret)
154 goto fail;
155
156 trigger = ieee80211_get_assoc_led_name(sc->hw);
157 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
158 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
159 ret = ath_register_led(sc, &sc->assoc_led, trigger);
160 sc->assoc_led.led_type = ATH_LED_ASSOC;
161 if (ret)
162 goto fail;
163
164 trigger = ieee80211_get_tx_led_name(sc->hw);
165 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
166 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
167 ret = ath_register_led(sc, &sc->tx_led, trigger);
168 sc->tx_led.led_type = ATH_LED_TX;
169 if (ret)
170 goto fail;
171
172 trigger = ieee80211_get_rx_led_name(sc->hw);
173 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
174 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
175 ret = ath_register_led(sc, &sc->rx_led, trigger);
176 sc->rx_led.led_type = ATH_LED_RX;
177 if (ret)
178 goto fail;
179
180 return;
181
182fail:
183 cancel_delayed_work_sync(&sc->ath_led_blink_work);
184 ath_deinit_leds(sc);
185}
186
187/*******************/
188/* Rfkill */
189/*******************/
190
191static bool ath_is_rfkill_set(struct ath_softc *sc)
192{
193 struct ath_hw *ah = sc->sc_ah;
194
195 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
196 ah->rfkill_polarity;
197}
198
199void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
200{
201 struct ath_wiphy *aphy = hw->priv;
202 struct ath_softc *sc = aphy->sc;
203 bool blocked = !!ath_is_rfkill_set(sc);
204
205 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
206}
207
208void ath_start_rfkill_poll(struct ath_softc *sc)
209{
210 struct ath_hw *ah = sc->sc_ah;
211
212 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
213 wiphy_rfkill_start_polling(sc->hw->wiphy);
214}
215
216/******************/
217/* BTCOEX */
218/******************/
219
220/*
221 * Detects if there is any priority bt traffic
222 */
223static void ath_detect_bt_priority(struct ath_softc *sc)
224{
225 struct ath_btcoex *btcoex = &sc->btcoex;
226 struct ath_hw *ah = sc->sc_ah;
227
228 if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
229 btcoex->bt_priority_cnt++;
230
231 if (time_after(jiffies, btcoex->bt_priority_time +
232 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
233 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
234 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
235 "BT priority traffic detected");
236 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
237 } else {
238 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
239 }
240
241 btcoex->bt_priority_cnt = 0;
242 btcoex->bt_priority_time = jiffies;
243 }
244}
245
246/*
247 * Configures appropriate weight based on stomp type.
248 */
249static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
250 enum ath_stomp_type stomp_type)
251{
252 struct ath_hw *ah = sc->sc_ah;
253
254 switch (stomp_type) {
255 case ATH_BTCOEX_STOMP_ALL:
256 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
257 AR_STOMP_ALL_WLAN_WGHT);
258 break;
259 case ATH_BTCOEX_STOMP_LOW:
260 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
261 AR_STOMP_LOW_WLAN_WGHT);
262 break;
263 case ATH_BTCOEX_STOMP_NONE:
264 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
265 AR_STOMP_NONE_WLAN_WGHT);
266 break;
267 default:
268 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
269 "Invalid Stomptype\n");
270 break;
271 }
272
273 ath9k_hw_btcoex_enable(ah);
274}
275
276static void ath9k_gen_timer_start(struct ath_hw *ah,
277 struct ath_gen_timer *timer,
278 u32 timer_next,
279 u32 timer_period)
280{
281 struct ath_common *common = ath9k_hw_common(ah);
282 struct ath_softc *sc = (struct ath_softc *) common->priv;
283
284 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
285
286 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
287 ath9k_hw_set_interrupts(ah, 0);
288 sc->imask |= ATH9K_INT_GENTIMER;
289 ath9k_hw_set_interrupts(ah, sc->imask);
290 }
291}
292
293static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
294{
295 struct ath_common *common = ath9k_hw_common(ah);
296 struct ath_softc *sc = (struct ath_softc *) common->priv;
297 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
298
299 ath9k_hw_gen_timer_stop(ah, timer);
300
301 /* if no timer is enabled, turn off interrupt mask */
302 if (timer_table->timer_mask.val == 0) {
303 ath9k_hw_set_interrupts(ah, 0);
304 sc->imask &= ~ATH9K_INT_GENTIMER;
305 ath9k_hw_set_interrupts(ah, sc->imask);
306 }
307}
308
309/*
310 * This is the master bt coex timer which runs for every
311 * 45ms, bt traffic will be given priority during 55% of this
312 * period while wlan gets remaining 45%
313 */
314static void ath_btcoex_period_timer(unsigned long data)
315{
316 struct ath_softc *sc = (struct ath_softc *) data;
317 struct ath_hw *ah = sc->sc_ah;
318 struct ath_btcoex *btcoex = &sc->btcoex;
319
320 ath_detect_bt_priority(sc);
321
322 spin_lock_bh(&btcoex->btcoex_lock);
323
324 ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
325
326 spin_unlock_bh(&btcoex->btcoex_lock);
327
328 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
329 if (btcoex->hw_timer_enabled)
330 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
331
332 ath9k_gen_timer_start(ah,
333 btcoex->no_stomp_timer,
334 (ath9k_hw_gettsf32(ah) +
335 btcoex->btcoex_no_stomp),
336 btcoex->btcoex_no_stomp * 10);
337 btcoex->hw_timer_enabled = true;
338 }
339
340 mod_timer(&btcoex->period_timer, jiffies +
341 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
342}
343
344/*
345 * Generic tsf based hw timer which configures weight
346 * registers to time slice between wlan and bt traffic
347 */
348static void ath_btcoex_no_stomp_timer(void *arg)
349{
350 struct ath_softc *sc = (struct ath_softc *)arg;
351 struct ath_hw *ah = sc->sc_ah;
352 struct ath_btcoex *btcoex = &sc->btcoex;
353
354 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
355 "no stomp timer running \n");
356
357 spin_lock_bh(&btcoex->btcoex_lock);
358
359 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
360 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
361 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
362 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
363
364 spin_unlock_bh(&btcoex->btcoex_lock);
365}
366
367int ath_init_btcoex_timer(struct ath_softc *sc)
368{
369 struct ath_btcoex *btcoex = &sc->btcoex;
370
371 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
372 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
373 btcoex->btcoex_period / 100;
374
375 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
376 (unsigned long) sc);
377
378 spin_lock_init(&btcoex->btcoex_lock);
379
380 btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
381 ath_btcoex_no_stomp_timer,
382 ath_btcoex_no_stomp_timer,
383 (void *) sc, AR_FIRST_NDP_TIMER);
384
385 if (!btcoex->no_stomp_timer)
386 return -ENOMEM;
387
388 return 0;
389}
390
391/*
392 * (Re)start btcoex timers
393 */
394void ath9k_btcoex_timer_resume(struct ath_softc *sc)
395{
396 struct ath_btcoex *btcoex = &sc->btcoex;
397 struct ath_hw *ah = sc->sc_ah;
398
399 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
400 "Starting btcoex timers");
401
402 /* make sure duty cycle timer is also stopped when resuming */
403 if (btcoex->hw_timer_enabled)
404 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
405
406 btcoex->bt_priority_cnt = 0;
407 btcoex->bt_priority_time = jiffies;
408 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
409
410 mod_timer(&btcoex->period_timer, jiffies);
411}
412
413
414/*
415 * Pause btcoex timer and bt duty cycle timer
416 */
417void ath9k_btcoex_timer_pause(struct ath_softc *sc)
418{
419 struct ath_btcoex *btcoex = &sc->btcoex;
420 struct ath_hw *ah = sc->sc_ah;
421
422 del_timer_sync(&btcoex->period_timer);
423
424 if (btcoex->hw_timer_enabled)
425 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
426
427 btcoex->hw_timer_enabled = false;
428}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ae371448b5a..1a27f39c1ad 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -52,28 +52,6 @@ module_exit(ath9k_exit);
52/* Helper Functions */ 52/* Helper Functions */
53/********************/ 53/********************/
54 54
55static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
56{
57 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
58
59 if (!ah->curchan) /* should really check for CCK instead */
60 return clks / ATH9K_CLOCK_RATE_CCK;
61 if (conf->channel->band == IEEE80211_BAND_2GHZ)
62 return clks / ATH9K_CLOCK_RATE_2GHZ_OFDM;
63
64 return clks / ATH9K_CLOCK_RATE_5GHZ_OFDM;
65}
66
67static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
68{
69 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
70
71 if (conf_is_ht40(conf))
72 return ath9k_hw_mac_usec(ah, clks) / 2;
73 else
74 return ath9k_hw_mac_usec(ah, clks);
75}
76
77static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs) 55static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
78{ 56{
79 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 57 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -343,30 +321,6 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
343 return true; 321 return true;
344} 322}
345 323
346static const char *ath9k_hw_devname(u16 devid)
347{
348 switch (devid) {
349 case AR5416_DEVID_PCI:
350 return "Atheros 5416";
351 case AR5416_DEVID_PCIE:
352 return "Atheros 5418";
353 case AR9160_DEVID_PCI:
354 return "Atheros 9160";
355 case AR5416_AR9100_DEVID:
356 return "Atheros 9100";
357 case AR9280_DEVID_PCI:
358 case AR9280_DEVID_PCIE:
359 return "Atheros 9280";
360 case AR9285_DEVID_PCIE:
361 return "Atheros 9285";
362 case AR5416_DEVID_AR9287_PCI:
363 case AR5416_DEVID_AR9287_PCIE:
364 return "Atheros 9287";
365 }
366
367 return NULL;
368}
369
370static void ath9k_hw_init_config(struct ath_hw *ah) 324static void ath9k_hw_init_config(struct ath_hw *ah)
371{ 325{
372 int i; 326 int i;
@@ -392,7 +346,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
392 ah->config.spurchans[i][1] = AR_NO_SPUR; 346 ah->config.spurchans[i][1] = AR_NO_SPUR;
393 } 347 }
394 348
395 ah->config.intr_mitigation = true; 349 ah->config.rx_intr_mitigation = true;
396 350
397 /* 351 /*
398 * We need this for PCI devices only (Cardbus, PCI, miniPCI) 352 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -437,8 +391,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
437 ah->beacon_interval = 100; 391 ah->beacon_interval = 100;
438 ah->enable_32kHz_clock = DONT_USE_32KHZ; 392 ah->enable_32kHz_clock = DONT_USE_32KHZ;
439 ah->slottime = (u32) -1; 393 ah->slottime = (u32) -1;
440 ah->acktimeout = (u32) -1;
441 ah->ctstimeout = (u32) -1;
442 ah->globaltxtimeout = (u32) -1; 394 ah->globaltxtimeout = (u32) -1;
443 ah->power_mode = ATH9K_PM_UNDEFINED; 395 ah->power_mode = ATH9K_PM_UNDEFINED;
444} 396}
@@ -1183,7 +1135,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1183 AR_IMR_RXORN | 1135 AR_IMR_RXORN |
1184 AR_IMR_BCNMISC; 1136 AR_IMR_BCNMISC;
1185 1137
1186 if (ah->config.intr_mitigation) 1138 if (ah->config.rx_intr_mitigation)
1187 ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; 1139 ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
1188 else 1140 else
1189 ah->mask_reg |= AR_IMR_RXOK; 1141 ah->mask_reg |= AR_IMR_RXOK;
@@ -1203,34 +1155,25 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1203 } 1155 }
1204} 1156}
1205 1157
1206static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) 1158static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
1207{ 1159{
1208 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) { 1160 u32 val = ath9k_hw_mac_to_clks(ah, us);
1209 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, 1161 val = min(val, (u32) 0xFFFF);
1210 "bad ack timeout %u\n", us); 1162 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
1211 ah->acktimeout = (u32) -1;
1212 return false;
1213 } else {
1214 REG_RMW_FIELD(ah, AR_TIME_OUT,
1215 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
1216 ah->acktimeout = us;
1217 return true;
1218 }
1219} 1163}
1220 1164
1221static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) 1165static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
1222{ 1166{
1223 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) { 1167 u32 val = ath9k_hw_mac_to_clks(ah, us);
1224 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, 1168 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
1225 "bad cts timeout %u\n", us); 1169 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
1226 ah->ctstimeout = (u32) -1; 1170}
1227 return false; 1171
1228 } else { 1172static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
1229 REG_RMW_FIELD(ah, AR_TIME_OUT, 1173{
1230 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us)); 1174 u32 val = ath9k_hw_mac_to_clks(ah, us);
1231 ah->ctstimeout = us; 1175 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
1232 return true; 1176 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
1233 }
1234} 1177}
1235 1178
1236static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) 1179static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
@@ -1247,31 +1190,37 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1247 } 1190 }
1248} 1191}
1249 1192
1250static void ath9k_hw_init_user_settings(struct ath_hw *ah) 1193void ath9k_hw_init_global_settings(struct ath_hw *ah)
1251{ 1194{
1195 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
1196 int acktimeout;
1197 int slottime;
1198 int sifstime;
1199
1252 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n", 1200 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
1253 ah->misc_mode); 1201 ah->misc_mode);
1254 1202
1255 if (ah->misc_mode != 0) 1203 if (ah->misc_mode != 0)
1256 REG_WRITE(ah, AR_PCU_MISC, 1204 REG_WRITE(ah, AR_PCU_MISC,
1257 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode); 1205 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
1258 if (ah->slottime != (u32) -1) 1206
1259 ath9k_hw_setslottime(ah, ah->slottime); 1207 if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
1260 if (ah->acktimeout != (u32) -1) 1208 sifstime = 16;
1261 ath9k_hw_set_ack_timeout(ah, ah->acktimeout); 1209 else
1262 if (ah->ctstimeout != (u32) -1) 1210 sifstime = 10;
1263 ath9k_hw_set_cts_timeout(ah, ah->ctstimeout); 1211
1212 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1213 slottime = ah->slottime + 3 * ah->coverage_class;
1214 acktimeout = slottime + sifstime;
1215 ath9k_hw_setslottime(ah, slottime);
1216 ath9k_hw_set_ack_timeout(ah, acktimeout);
1217 ath9k_hw_set_cts_timeout(ah, acktimeout);
1264 if (ah->globaltxtimeout != (u32) -1) 1218 if (ah->globaltxtimeout != (u32) -1)
1265 ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout); 1219 ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
1266} 1220}
1221EXPORT_SYMBOL(ath9k_hw_init_global_settings);
1267 1222
1268const char *ath9k_hw_probe(u16 vendorid, u16 devid) 1223void ath9k_hw_deinit(struct ath_hw *ah)
1269{
1270 return vendorid == ATHEROS_VENDOR_ID ?
1271 ath9k_hw_devname(devid) : NULL;
1272}
1273
1274void ath9k_hw_detach(struct ath_hw *ah)
1275{ 1224{
1276 struct ath_common *common = ath9k_hw_common(ah); 1225 struct ath_common *common = ath9k_hw_common(ah);
1277 1226
@@ -1289,7 +1238,7 @@ free_hw:
1289 kfree(ah); 1238 kfree(ah);
1290 ah = NULL; 1239 ah = NULL;
1291} 1240}
1292EXPORT_SYMBOL(ath9k_hw_detach); 1241EXPORT_SYMBOL(ath9k_hw_deinit);
1293 1242
1294/*******/ 1243/*******/
1295/* INI */ 1244/* INI */
@@ -2090,7 +2039,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2090 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2039 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2091 ath9k_enable_rfkill(ah); 2040 ath9k_enable_rfkill(ah);
2092 2041
2093 ath9k_hw_init_user_settings(ah); 2042 ath9k_hw_init_global_settings(ah);
2094 2043
2095 if (AR_SREV_9287_12_OR_LATER(ah)) { 2044 if (AR_SREV_9287_12_OR_LATER(ah)) {
2096 REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 2045 REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
@@ -2120,7 +2069,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2120 2069
2121 REG_WRITE(ah, AR_OBS, 8); 2070 REG_WRITE(ah, AR_OBS, 8);
2122 2071
2123 if (ah->config.intr_mitigation) { 2072 if (ah->config.rx_intr_mitigation) {
2124 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); 2073 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
2125 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); 2074 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
2126 } 2075 }
@@ -2780,7 +2729,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
2780 2729
2781 *masked = isr & ATH9K_INT_COMMON; 2730 *masked = isr & ATH9K_INT_COMMON;
2782 2731
2783 if (ah->config.intr_mitigation) { 2732 if (ah->config.rx_intr_mitigation) {
2784 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) 2733 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
2785 *masked |= ATH9K_INT_RX; 2734 *masked |= ATH9K_INT_RX;
2786 } 2735 }
@@ -2913,7 +2862,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
2913 } 2862 }
2914 if (ints & ATH9K_INT_RX) { 2863 if (ints & ATH9K_INT_RX) {
2915 mask |= AR_IMR_RXERR; 2864 mask |= AR_IMR_RXERR;
2916 if (ah->config.intr_mitigation) 2865 if (ah->config.rx_intr_mitigation)
2917 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 2866 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
2918 else 2867 else
2919 mask |= AR_IMR_RXOK | AR_IMR_RXDESC; 2868 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
@@ -3687,21 +3636,6 @@ u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
3687} 3636}
3688EXPORT_SYMBOL(ath9k_hw_extend_tsf); 3637EXPORT_SYMBOL(ath9k_hw_extend_tsf);
3689 3638
3690bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
3691{
3692 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
3693 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
3694 "bad slot time %u\n", us);
3695 ah->slottime = (u32) -1;
3696 return false;
3697 } else {
3698 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
3699 ah->slottime = us;
3700 return true;
3701 }
3702}
3703EXPORT_SYMBOL(ath9k_hw_setslottime);
3704
3705void ath9k_hw_set11nmac2040(struct ath_hw *ah) 3639void ath9k_hw_set11nmac2040(struct ath_hw *ah)
3706{ 3640{
3707 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 3641 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e2b0c73a616..ab1f1981d85 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -212,7 +212,7 @@ struct ath9k_ops_config {
212 u32 cck_trig_low; 212 u32 cck_trig_low;
213 u32 enable_ani; 213 u32 enable_ani;
214 int serialize_regmode; 214 int serialize_regmode;
215 bool intr_mitigation; 215 bool rx_intr_mitigation;
216#define SPUR_DISABLE 0 216#define SPUR_DISABLE 0
217#define SPUR_ENABLE_IOCTL 1 217#define SPUR_ENABLE_IOCTL 1
218#define SPUR_ENABLE_EEPROM 2 218#define SPUR_ENABLE_EEPROM 2
@@ -551,10 +551,9 @@ struct ath_hw {
551 u32 *bank6Temp; 551 u32 *bank6Temp;
552 552
553 int16_t txpower_indexoffset; 553 int16_t txpower_indexoffset;
554 int coverage_class;
554 u32 beacon_interval; 555 u32 beacon_interval;
555 u32 slottime; 556 u32 slottime;
556 u32 acktimeout;
557 u32 ctstimeout;
558 u32 globaltxtimeout; 557 u32 globaltxtimeout;
559 558
560 /* ANI */ 559 /* ANI */
@@ -616,7 +615,7 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
616 615
617/* Initialization, Detach, Reset */ 616/* Initialization, Detach, Reset */
618const char *ath9k_hw_probe(u16 vendorid, u16 devid); 617const char *ath9k_hw_probe(u16 vendorid, u16 devid);
619void ath9k_hw_detach(struct ath_hw *ah); 618void ath9k_hw_deinit(struct ath_hw *ah);
620int ath9k_hw_init(struct ath_hw *ah); 619int ath9k_hw_init(struct ath_hw *ah);
621int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 620int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
622 bool bChannelChange); 621 bool bChannelChange);
@@ -668,7 +667,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
668void ath9k_hw_reset_tsf(struct ath_hw *ah); 667void ath9k_hw_reset_tsf(struct ath_hw *ah);
669void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); 668void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
670u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp); 669u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
671bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us); 670void ath9k_hw_init_global_settings(struct ath_hw *ah);
672void ath9k_hw_set11nmac2040(struct ath_hw *ah); 671void ath9k_hw_set11nmac2040(struct ath_hw *ah);
673void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 672void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
674void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 673void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
new file mode 100644
index 00000000000..5f78d7a5ff2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -0,0 +1,861 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18
19static char *dev_info = "ath9k";
20
21MODULE_AUTHOR("Atheros Communications");
22MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
23MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
24MODULE_LICENSE("Dual BSD/GPL");
25
26static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
27module_param_named(debug, ath9k_debug, uint, 0);
28MODULE_PARM_DESC(debug, "Debugging mask");
29
30int modparam_nohwcrypt;
31module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
32MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
33
34/* We use the hw_value as an index into our private channel structure */
35
36#define CHAN2G(_freq, _idx) { \
37 .center_freq = (_freq), \
38 .hw_value = (_idx), \
39 .max_power = 20, \
40}
41
42#define CHAN5G(_freq, _idx) { \
43 .band = IEEE80211_BAND_5GHZ, \
44 .center_freq = (_freq), \
45 .hw_value = (_idx), \
46 .max_power = 20, \
47}
48
49/* Some 2 GHz radios are actually tunable on 2312-2732
50 * on 5 MHz steps, we support the channels which we know
51 * we have calibration data for all cards though to make
52 * this static */
53static struct ieee80211_channel ath9k_2ghz_chantable[] = {
54 CHAN2G(2412, 0), /* Channel 1 */
55 CHAN2G(2417, 1), /* Channel 2 */
56 CHAN2G(2422, 2), /* Channel 3 */
57 CHAN2G(2427, 3), /* Channel 4 */
58 CHAN2G(2432, 4), /* Channel 5 */
59 CHAN2G(2437, 5), /* Channel 6 */
60 CHAN2G(2442, 6), /* Channel 7 */
61 CHAN2G(2447, 7), /* Channel 8 */
62 CHAN2G(2452, 8), /* Channel 9 */
63 CHAN2G(2457, 9), /* Channel 10 */
64 CHAN2G(2462, 10), /* Channel 11 */
65 CHAN2G(2467, 11), /* Channel 12 */
66 CHAN2G(2472, 12), /* Channel 13 */
67 CHAN2G(2484, 13), /* Channel 14 */
68};
69
70/* Some 5 GHz radios are actually tunable on XXXX-YYYY
71 * on 5 MHz steps, we support the channels which we know
72 * we have calibration data for all cards though to make
73 * this static */
74static struct ieee80211_channel ath9k_5ghz_chantable[] = {
75 /* _We_ call this UNII 1 */
76 CHAN5G(5180, 14), /* Channel 36 */
77 CHAN5G(5200, 15), /* Channel 40 */
78 CHAN5G(5220, 16), /* Channel 44 */
79 CHAN5G(5240, 17), /* Channel 48 */
80 /* _We_ call this UNII 2 */
81 CHAN5G(5260, 18), /* Channel 52 */
82 CHAN5G(5280, 19), /* Channel 56 */
83 CHAN5G(5300, 20), /* Channel 60 */
84 CHAN5G(5320, 21), /* Channel 64 */
85 /* _We_ call this "Middle band" */
86 CHAN5G(5500, 22), /* Channel 100 */
87 CHAN5G(5520, 23), /* Channel 104 */
88 CHAN5G(5540, 24), /* Channel 108 */
89 CHAN5G(5560, 25), /* Channel 112 */
90 CHAN5G(5580, 26), /* Channel 116 */
91 CHAN5G(5600, 27), /* Channel 120 */
92 CHAN5G(5620, 28), /* Channel 124 */
93 CHAN5G(5640, 29), /* Channel 128 */
94 CHAN5G(5660, 30), /* Channel 132 */
95 CHAN5G(5680, 31), /* Channel 136 */
96 CHAN5G(5700, 32), /* Channel 140 */
97 /* _We_ call this UNII 3 */
98 CHAN5G(5745, 33), /* Channel 149 */
99 CHAN5G(5765, 34), /* Channel 153 */
100 CHAN5G(5785, 35), /* Channel 157 */
101 CHAN5G(5805, 36), /* Channel 161 */
102 CHAN5G(5825, 37), /* Channel 165 */
103};
104
105/* Atheros hardware rate code addition for short premble */
106#define SHPCHECK(__hw_rate, __flags) \
107 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
108
109#define RATE(_bitrate, _hw_rate, _flags) { \
110 .bitrate = (_bitrate), \
111 .flags = (_flags), \
112 .hw_value = (_hw_rate), \
113 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
114}
115
116static struct ieee80211_rate ath9k_legacy_rates[] = {
117 RATE(10, 0x1b, 0),
118 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
119 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
120 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(60, 0x0b, 0),
122 RATE(90, 0x0f, 0),
123 RATE(120, 0x0a, 0),
124 RATE(180, 0x0e, 0),
125 RATE(240, 0x09, 0),
126 RATE(360, 0x0d, 0),
127 RATE(480, 0x08, 0),
128 RATE(540, 0x0c, 0),
129};
130
131static void ath9k_deinit_softc(struct ath_softc *sc);
132
133/*
134 * Read and write, they both share the same lock. We do this to serialize
135 * reads and writes on Atheros 802.11n PCI devices only. This is required
136 * as the FIFO on these devices can only accept sanely 2 requests.
137 */
138
139static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
140{
141 struct ath_hw *ah = (struct ath_hw *) hw_priv;
142 struct ath_common *common = ath9k_hw_common(ah);
143 struct ath_softc *sc = (struct ath_softc *) common->priv;
144
145 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
146 unsigned long flags;
147 spin_lock_irqsave(&sc->sc_serial_rw, flags);
148 iowrite32(val, sc->mem + reg_offset);
149 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
150 } else
151 iowrite32(val, sc->mem + reg_offset);
152}
153
154static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
155{
156 struct ath_hw *ah = (struct ath_hw *) hw_priv;
157 struct ath_common *common = ath9k_hw_common(ah);
158 struct ath_softc *sc = (struct ath_softc *) common->priv;
159 u32 val;
160
161 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
162 unsigned long flags;
163 spin_lock_irqsave(&sc->sc_serial_rw, flags);
164 val = ioread32(sc->mem + reg_offset);
165 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
166 } else
167 val = ioread32(sc->mem + reg_offset);
168 return val;
169}
170
171static const struct ath_ops ath9k_common_ops = {
172 .read = ath9k_ioread32,
173 .write = ath9k_iowrite32,
174};
175
176/**************************/
177/* Initialization */
178/**************************/
179
180static void setup_ht_cap(struct ath_softc *sc,
181 struct ieee80211_sta_ht_cap *ht_info)
182{
183 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
184 u8 tx_streams, rx_streams;
185
186 ht_info->ht_supported = true;
187 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
188 IEEE80211_HT_CAP_SM_PS |
189 IEEE80211_HT_CAP_SGI_40 |
190 IEEE80211_HT_CAP_DSSSCCK40;
191
192 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
193 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
194
195 /* set up supported mcs set */
196 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
197 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
198 1 : 2;
199 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
200 1 : 2;
201
202 if (tx_streams != rx_streams) {
203 ath_print(common, ATH_DBG_CONFIG,
204 "TX streams %d, RX streams: %d\n",
205 tx_streams, rx_streams);
206 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
207 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
208 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
209 }
210
211 ht_info->mcs.rx_mask[0] = 0xff;
212 if (rx_streams >= 2)
213 ht_info->mcs.rx_mask[1] = 0xff;
214
215 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
216}
217
218static int ath9k_reg_notifier(struct wiphy *wiphy,
219 struct regulatory_request *request)
220{
221 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
222 struct ath_wiphy *aphy = hw->priv;
223 struct ath_softc *sc = aphy->sc;
224 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
225
226 return ath_reg_notifier_apply(wiphy, request, reg);
227}
228
229/*
230 * This function will allocate both the DMA descriptor structure, and the
231 * buffers it contains. These are used to contain the descriptors used
232 * by the system.
233*/
234int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
235 struct list_head *head, const char *name,
236 int nbuf, int ndesc)
237{
238#define DS2PHYS(_dd, _ds) \
239 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
240#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
241#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
242 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
243 struct ath_desc *ds;
244 struct ath_buf *bf;
245 int i, bsize, error;
246
247 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
248 name, nbuf, ndesc);
249
250 INIT_LIST_HEAD(head);
251 /* ath_desc must be a multiple of DWORDs */
252 if ((sizeof(struct ath_desc) % 4) != 0) {
253 ath_print(common, ATH_DBG_FATAL,
254 "ath_desc not DWORD aligned\n");
255 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
256 error = -ENOMEM;
257 goto fail;
258 }
259
260 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
261
262 /*
263 * Need additional DMA memory because we can't use
264 * descriptors that cross the 4K page boundary. Assume
265 * one skipped descriptor per 4K page.
266 */
267 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
268 u32 ndesc_skipped =
269 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
270 u32 dma_len;
271
272 while (ndesc_skipped) {
273 dma_len = ndesc_skipped * sizeof(struct ath_desc);
274 dd->dd_desc_len += dma_len;
275
276 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
277 };
278 }
279
280 /* allocate descriptors */
281 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
282 &dd->dd_desc_paddr, GFP_KERNEL);
283 if (dd->dd_desc == NULL) {
284 error = -ENOMEM;
285 goto fail;
286 }
287 ds = dd->dd_desc;
288 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
289 name, ds, (u32) dd->dd_desc_len,
290 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
291
292 /* allocate buffers */
293 bsize = sizeof(struct ath_buf) * nbuf;
294 bf = kzalloc(bsize, GFP_KERNEL);
295 if (bf == NULL) {
296 error = -ENOMEM;
297 goto fail2;
298 }
299 dd->dd_bufptr = bf;
300
301 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
302 bf->bf_desc = ds;
303 bf->bf_daddr = DS2PHYS(dd, ds);
304
305 if (!(sc->sc_ah->caps.hw_caps &
306 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
307 /*
308 * Skip descriptor addresses which can cause 4KB
309 * boundary crossing (addr + length) with a 32 dword
310 * descriptor fetch.
311 */
312 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
313 BUG_ON((caddr_t) bf->bf_desc >=
314 ((caddr_t) dd->dd_desc +
315 dd->dd_desc_len));
316
317 ds += ndesc;
318 bf->bf_desc = ds;
319 bf->bf_daddr = DS2PHYS(dd, ds);
320 }
321 }
322 list_add_tail(&bf->list, head);
323 }
324 return 0;
325fail2:
326 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
327 dd->dd_desc_paddr);
328fail:
329 memset(dd, 0, sizeof(*dd));
330 return error;
331#undef ATH_DESC_4KB_BOUND_CHECK
332#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
333#undef DS2PHYS
334}
335
336static void ath9k_init_crypto(struct ath_softc *sc)
337{
338 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
339 int i = 0;
340
341 /* Get the hardware key cache size. */
342 common->keymax = sc->sc_ah->caps.keycache_size;
343 if (common->keymax > ATH_KEYMAX) {
344 ath_print(common, ATH_DBG_ANY,
345 "Warning, using only %u entries in %u key cache\n",
346 ATH_KEYMAX, common->keymax);
347 common->keymax = ATH_KEYMAX;
348 }
349
350 /*
351 * Reset the key cache since some parts do not
352 * reset the contents on initial power up.
353 */
354 for (i = 0; i < common->keymax; i++)
355 ath9k_hw_keyreset(sc->sc_ah, (u16) i);
356
357 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
358 ATH9K_CIPHER_TKIP, NULL)) {
359 /*
360 * Whether we should enable h/w TKIP MIC.
361 * XXX: if we don't support WME TKIP MIC, then we wouldn't
362 * report WMM capable, so it's always safe to turn on
363 * TKIP MIC in this case.
364 */
365 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
366 }
367
368 /*
369 * Check whether the separate key cache entries
370 * are required to handle both tx+rx MIC keys.
371 * With split mic keys the number of stations is limited
372 * to 27 otherwise 59.
373 */
374 if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
375 ATH9K_CIPHER_TKIP, NULL)
376 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
377 ATH9K_CIPHER_MIC, NULL)
378 && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
379 0, NULL))
380 common->splitmic = 1;
381
382 /* turn on mcast key search if possible */
383 if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
384 (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
385 1, 1, NULL);
386
387}
388
389static int ath9k_init_btcoex(struct ath_softc *sc)
390{
391 int r, qnum;
392
393 switch (sc->sc_ah->btcoex_hw.scheme) {
394 case ATH_BTCOEX_CFG_NONE:
395 break;
396 case ATH_BTCOEX_CFG_2WIRE:
397 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
398 break;
399 case ATH_BTCOEX_CFG_3WIRE:
400 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
401 r = ath_init_btcoex_timer(sc);
402 if (r)
403 return -1;
404 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
405 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
406 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
407 break;
408 default:
409 WARN_ON(1);
410 break;
411 }
412
413 return 0;
414}
415
416static int ath9k_init_queues(struct ath_softc *sc)
417{
418 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
419 int i = 0;
420
421 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
422 sc->tx.hwq_map[i] = -1;
423
424 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
425 if (sc->beacon.beaconq == -1) {
426 ath_print(common, ATH_DBG_FATAL,
427 "Unable to setup a beacon xmit queue\n");
428 goto err;
429 }
430
431 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
432 if (sc->beacon.cabq == NULL) {
433 ath_print(common, ATH_DBG_FATAL,
434 "Unable to setup CAB xmit queue\n");
435 goto err;
436 }
437
438 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
439 ath_cabq_update(sc);
440
441 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
442 ath_print(common, ATH_DBG_FATAL,
443 "Unable to setup xmit queue for BK traffic\n");
444 goto err;
445 }
446
447 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
448 ath_print(common, ATH_DBG_FATAL,
449 "Unable to setup xmit queue for BE traffic\n");
450 goto err;
451 }
452 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
453 ath_print(common, ATH_DBG_FATAL,
454 "Unable to setup xmit queue for VI traffic\n");
455 goto err;
456 }
457 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
458 ath_print(common, ATH_DBG_FATAL,
459 "Unable to setup xmit queue for VO traffic\n");
460 goto err;
461 }
462
463 return 0;
464
465err:
466 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
467 if (ATH_TXQ_SETUP(sc, i))
468 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
469
470 return -EIO;
471}
472
473static void ath9k_init_channels_rates(struct ath_softc *sc)
474{
475 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
476 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
477 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
478 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
479 ARRAY_SIZE(ath9k_2ghz_chantable);
480 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
481 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
482 ARRAY_SIZE(ath9k_legacy_rates);
483 }
484
485 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
486 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
487 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
488 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
489 ARRAY_SIZE(ath9k_5ghz_chantable);
490 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
491 ath9k_legacy_rates + 4;
492 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
493 ARRAY_SIZE(ath9k_legacy_rates) - 4;
494 }
495}
496
497static void ath9k_init_misc(struct ath_softc *sc)
498{
499 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
500 int i = 0;
501
502 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
503 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
504
505 sc->config.txpowlimit = ATH_TXPOWER_MAX;
506
507 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
508 sc->sc_flags |= SC_OP_TXAGGR;
509 sc->sc_flags |= SC_OP_RXAGGR;
510 }
511
512 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
513 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
514
515 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
516 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
517
518 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
519 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
520
521 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
522
523 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
524 sc->beacon.bslot[i] = NULL;
525 sc->beacon.bslot_aphy[i] = NULL;
526 }
527}
528
529static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
530 const struct ath_bus_ops *bus_ops)
531{
532 struct ath_hw *ah = NULL;
533 struct ath_common *common;
534 int ret = 0, i;
535 int csz = 0;
536
537 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
538 if (!ah)
539 return -ENOMEM;
540
541 ah->hw_version.devid = devid;
542 ah->hw_version.subsysid = subsysid;
543 sc->sc_ah = ah;
544
545 common = ath9k_hw_common(ah);
546 common->ops = &ath9k_common_ops;
547 common->bus_ops = bus_ops;
548 common->ah = ah;
549 common->hw = sc->hw;
550 common->priv = sc;
551 common->debug_mask = ath9k_debug;
552
553 spin_lock_init(&sc->wiphy_lock);
554 spin_lock_init(&sc->sc_resetlock);
555 spin_lock_init(&sc->sc_serial_rw);
556 spin_lock_init(&sc->sc_pm_lock);
557 mutex_init(&sc->mutex);
558 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
559 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
560 (unsigned long)sc);
561
562 /*
563 * Cache line size is used to size and align various
564 * structures used to communicate with the hardware.
565 */
566 ath_read_cachesize(common, &csz);
567 common->cachelsz = csz << 2; /* convert to bytes */
568
569 ret = ath9k_hw_init(ah);
570 if (ret) {
571 ath_print(common, ATH_DBG_FATAL,
572 "Unable to initialize hardware; "
573 "initialization status: %d\n", ret);
574 goto err_hw;
575 }
576
577 ret = ath9k_init_debug(ah);
578 if (ret) {
579 ath_print(common, ATH_DBG_FATAL,
580 "Unable to create debugfs files\n");
581 goto err_debug;
582 }
583
584 ret = ath9k_init_queues(sc);
585 if (ret)
586 goto err_queues;
587
588 ret = ath9k_init_btcoex(sc);
589 if (ret)
590 goto err_btcoex;
591
592 ath9k_init_crypto(sc);
593 ath9k_init_channels_rates(sc);
594 ath9k_init_misc(sc);
595
596 return 0;
597
598err_btcoex:
599 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
600 if (ATH_TXQ_SETUP(sc, i))
601 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
602err_queues:
603 ath9k_exit_debug(ah);
604err_debug:
605 ath9k_hw_deinit(ah);
606err_hw:
607 tasklet_kill(&sc->intr_tq);
608 tasklet_kill(&sc->bcon_tasklet);
609
610 kfree(ah);
611 sc->sc_ah = NULL;
612
613 return ret;
614}
615
616void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
617{
618 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
619
620 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
621 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
622 IEEE80211_HW_SIGNAL_DBM |
623 IEEE80211_HW_AMPDU_AGGREGATION |
624 IEEE80211_HW_SUPPORTS_PS |
625 IEEE80211_HW_PS_NULLFUNC_STACK |
626 IEEE80211_HW_SPECTRUM_MGMT;
627
628 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
629 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
630
631 hw->wiphy->interface_modes =
632 BIT(NL80211_IFTYPE_AP) |
633 BIT(NL80211_IFTYPE_STATION) |
634 BIT(NL80211_IFTYPE_ADHOC) |
635 BIT(NL80211_IFTYPE_MESH_POINT);
636
637 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
638
639 hw->queues = 4;
640 hw->max_rates = 4;
641 hw->channel_change_time = 5000;
642 hw->max_listen_interval = 10;
643 /* Hardware supports 10 but we use 4 */
644 hw->max_rate_tries = 4;
645 hw->sta_data_size = sizeof(struct ath_node);
646 hw->vif_data_size = sizeof(struct ath_vif);
647
648 hw->rate_control_algorithm = "ath9k_rate_control";
649
650 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
651 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
652 &sc->sbands[IEEE80211_BAND_2GHZ];
653 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
654 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
655 &sc->sbands[IEEE80211_BAND_5GHZ];
656
657 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
658 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
659 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
660 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
661 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
662 }
663
664 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
665}
666
667int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
668 const struct ath_bus_ops *bus_ops)
669{
670 struct ieee80211_hw *hw = sc->hw;
671 struct ath_common *common;
672 struct ath_hw *ah;
673 int error = 0;
674 struct ath_regulatory *reg;
675
676 /* Bring up device */
677 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
678 if (error != 0)
679 goto error_init;
680
681 ah = sc->sc_ah;
682 common = ath9k_hw_common(ah);
683 ath9k_set_hw_capab(sc, hw);
684
685 /* Initialize regulatory */
686 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
687 ath9k_reg_notifier);
688 if (error)
689 goto error_regd;
690
691 reg = &common->regulatory;
692
693 /* Setup TX DMA */
694 error = ath_tx_init(sc, ATH_TXBUF);
695 if (error != 0)
696 goto error_tx;
697
698 /* Setup RX DMA */
699 error = ath_rx_init(sc, ATH_RXBUF);
700 if (error != 0)
701 goto error_rx;
702
703 /* Register with mac80211 */
704 error = ieee80211_register_hw(hw);
705 if (error)
706 goto error_register;
707
708 /* Handle world regulatory */
709 if (!ath_is_world_regd(reg)) {
710 error = regulatory_hint(hw->wiphy, reg->alpha2);
711 if (error)
712 goto error_world;
713 }
714
715 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
716 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
717 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
718
719 ath_init_leds(sc);
720 ath_start_rfkill_poll(sc);
721
722 return 0;
723
724error_world:
725 ieee80211_unregister_hw(hw);
726error_register:
727 ath_rx_cleanup(sc);
728error_rx:
729 ath_tx_cleanup(sc);
730error_tx:
731 /* Nothing */
732error_regd:
733 ath9k_deinit_softc(sc);
734error_init:
735 return error;
736}
737
738/*****************************/
739/* De-Initialization */
740/*****************************/
741
742static void ath9k_deinit_softc(struct ath_softc *sc)
743{
744 int i = 0;
745
746 if ((sc->btcoex.no_stomp_timer) &&
747 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
748 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
749
750 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
751 if (ATH_TXQ_SETUP(sc, i))
752 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
753
754 ath9k_exit_debug(sc->sc_ah);
755 ath9k_hw_deinit(sc->sc_ah);
756
757 tasklet_kill(&sc->intr_tq);
758 tasklet_kill(&sc->bcon_tasklet);
759}
760
761void ath9k_deinit_device(struct ath_softc *sc)
762{
763 struct ieee80211_hw *hw = sc->hw;
764 int i = 0;
765
766 ath9k_ps_wakeup(sc);
767
768 wiphy_rfkill_stop_polling(sc->hw->wiphy);
769 ath_deinit_leds(sc);
770
771 for (i = 0; i < sc->num_sec_wiphy; i++) {
772 struct ath_wiphy *aphy = sc->sec_wiphy[i];
773 if (aphy == NULL)
774 continue;
775 sc->sec_wiphy[i] = NULL;
776 ieee80211_unregister_hw(aphy->hw);
777 ieee80211_free_hw(aphy->hw);
778 }
779 kfree(sc->sec_wiphy);
780
781 ieee80211_unregister_hw(hw);
782 ath_rx_cleanup(sc);
783 ath_tx_cleanup(sc);
784 ath9k_deinit_softc(sc);
785}
786
787void ath_descdma_cleanup(struct ath_softc *sc,
788 struct ath_descdma *dd,
789 struct list_head *head)
790{
791 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
792 dd->dd_desc_paddr);
793
794 INIT_LIST_HEAD(head);
795 kfree(dd->dd_bufptr);
796 memset(dd, 0, sizeof(*dd));
797}
798
799/************************/
800/* Module Hooks */
801/************************/
802
803static int __init ath9k_init(void)
804{
805 int error;
806
807 /* Register rate control algorithm */
808 error = ath_rate_control_register();
809 if (error != 0) {
810 printk(KERN_ERR
811 "ath9k: Unable to register rate control "
812 "algorithm: %d\n",
813 error);
814 goto err_out;
815 }
816
817 error = ath9k_debug_create_root();
818 if (error) {
819 printk(KERN_ERR
820 "ath9k: Unable to create debugfs root: %d\n",
821 error);
822 goto err_rate_unregister;
823 }
824
825 error = ath_pci_init();
826 if (error < 0) {
827 printk(KERN_ERR
828 "ath9k: No PCI devices found, driver not installed.\n");
829 error = -ENODEV;
830 goto err_remove_root;
831 }
832
833 error = ath_ahb_init();
834 if (error < 0) {
835 error = -ENODEV;
836 goto err_pci_exit;
837 }
838
839 return 0;
840
841 err_pci_exit:
842 ath_pci_exit();
843
844 err_remove_root:
845 ath9k_debug_remove_root();
846 err_rate_unregister:
847 ath_rate_control_unregister();
848 err_out:
849 return error;
850}
851module_init(ath9k_init);
852
853static void __exit ath9k_exit(void)
854{
855 ath_ahb_exit();
856 ath_pci_exit();
857 ath9k_debug_remove_root();
858 ath_rate_control_unregister();
859 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
860}
861module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e185479e295..29851e6376a 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -167,6 +167,40 @@ struct ath_rx_status {
167#define ATH9K_RXKEYIX_INVALID ((u8)-1) 167#define ATH9K_RXKEYIX_INVALID ((u8)-1)
168#define ATH9K_TXKEYIX_INVALID ((u32)-1) 168#define ATH9K_TXKEYIX_INVALID ((u32)-1)
169 169
170enum ath9k_phyerr {
171 ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
172 ATH9K_PHYERR_TIMING = 1, /* Timing error */
173 ATH9K_PHYERR_PARITY = 2, /* Illegal parity */
174 ATH9K_PHYERR_RATE = 3, /* Illegal rate */
175 ATH9K_PHYERR_LENGTH = 4, /* Illegal length */
176 ATH9K_PHYERR_RADAR = 5, /* Radar detect */
177 ATH9K_PHYERR_SERVICE = 6, /* Illegal service */
178 ATH9K_PHYERR_TOR = 7, /* Transmit override receive */
179
180 ATH9K_PHYERR_OFDM_TIMING = 17,
181 ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18,
182 ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19,
183 ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20,
184 ATH9K_PHYERR_OFDM_POWER_DROP = 21,
185 ATH9K_PHYERR_OFDM_SERVICE = 22,
186 ATH9K_PHYERR_OFDM_RESTART = 23,
187 ATH9K_PHYERR_FALSE_RADAR_EXT = 24,
188
189 ATH9K_PHYERR_CCK_TIMING = 25,
190 ATH9K_PHYERR_CCK_HEADER_CRC = 26,
191 ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27,
192 ATH9K_PHYERR_CCK_SERVICE = 30,
193 ATH9K_PHYERR_CCK_RESTART = 31,
194 ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32,
195 ATH9K_PHYERR_CCK_POWER_DROP = 33,
196
197 ATH9K_PHYERR_HT_CRC_ERROR = 34,
198 ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
199 ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
200
201 ATH9K_PHYERR_MAX = 37,
202};
203
170struct ath_desc { 204struct ath_desc {
171 u32 ds_link; 205 u32 ds_link;
172 u32 ds_data; 206 u32 ds_data;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 643bea35686..6aaca0026da 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,118 +18,6 @@
18#include "ath9k.h" 18#include "ath9k.h"
19#include "btcoex.h" 19#include "btcoex.h"
20 20
21static char *dev_info = "ath9k";
22
23MODULE_AUTHOR("Atheros Communications");
24MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26MODULE_LICENSE("Dual BSD/GPL");
27
28static int modparam_nohwcrypt;
29module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
30MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
31
32static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
33module_param_named(debug, ath9k_debug, uint, 0);
34MODULE_PARM_DESC(debug, "Debugging mask");
35
36/* We use the hw_value as an index into our private channel structure */
37
38#define CHAN2G(_freq, _idx) { \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 20, \
42}
43
44#define CHAN5G(_freq, _idx) { \
45 .band = IEEE80211_BAND_5GHZ, \
46 .center_freq = (_freq), \
47 .hw_value = (_idx), \
48 .max_power = 20, \
49}
50
51/* Some 2 GHz radios are actually tunable on 2312-2732
52 * on 5 MHz steps, we support the channels which we know
53 * we have calibration data for all cards though to make
54 * this static */
55static struct ieee80211_channel ath9k_2ghz_chantable[] = {
56 CHAN2G(2412, 0), /* Channel 1 */
57 CHAN2G(2417, 1), /* Channel 2 */
58 CHAN2G(2422, 2), /* Channel 3 */
59 CHAN2G(2427, 3), /* Channel 4 */
60 CHAN2G(2432, 4), /* Channel 5 */
61 CHAN2G(2437, 5), /* Channel 6 */
62 CHAN2G(2442, 6), /* Channel 7 */
63 CHAN2G(2447, 7), /* Channel 8 */
64 CHAN2G(2452, 8), /* Channel 9 */
65 CHAN2G(2457, 9), /* Channel 10 */
66 CHAN2G(2462, 10), /* Channel 11 */
67 CHAN2G(2467, 11), /* Channel 12 */
68 CHAN2G(2472, 12), /* Channel 13 */
69 CHAN2G(2484, 13), /* Channel 14 */
70};
71
72/* Some 5 GHz radios are actually tunable on XXXX-YYYY
73 * on 5 MHz steps, we support the channels which we know
74 * we have calibration data for all cards though to make
75 * this static */
76static struct ieee80211_channel ath9k_5ghz_chantable[] = {
77 /* _We_ call this UNII 1 */
78 CHAN5G(5180, 14), /* Channel 36 */
79 CHAN5G(5200, 15), /* Channel 40 */
80 CHAN5G(5220, 16), /* Channel 44 */
81 CHAN5G(5240, 17), /* Channel 48 */
82 /* _We_ call this UNII 2 */
83 CHAN5G(5260, 18), /* Channel 52 */
84 CHAN5G(5280, 19), /* Channel 56 */
85 CHAN5G(5300, 20), /* Channel 60 */
86 CHAN5G(5320, 21), /* Channel 64 */
87 /* _We_ call this "Middle band" */
88 CHAN5G(5500, 22), /* Channel 100 */
89 CHAN5G(5520, 23), /* Channel 104 */
90 CHAN5G(5540, 24), /* Channel 108 */
91 CHAN5G(5560, 25), /* Channel 112 */
92 CHAN5G(5580, 26), /* Channel 116 */
93 CHAN5G(5600, 27), /* Channel 120 */
94 CHAN5G(5620, 28), /* Channel 124 */
95 CHAN5G(5640, 29), /* Channel 128 */
96 CHAN5G(5660, 30), /* Channel 132 */
97 CHAN5G(5680, 31), /* Channel 136 */
98 CHAN5G(5700, 32), /* Channel 140 */
99 /* _We_ call this UNII 3 */
100 CHAN5G(5745, 33), /* Channel 149 */
101 CHAN5G(5765, 34), /* Channel 153 */
102 CHAN5G(5785, 35), /* Channel 157 */
103 CHAN5G(5805, 36), /* Channel 161 */
104 CHAN5G(5825, 37), /* Channel 165 */
105};
106
107/* Atheros hardware rate code addition for short premble */
108#define SHPCHECK(__hw_rate, __flags) \
109 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
110
111#define RATE(_bitrate, _hw_rate, _flags) { \
112 .bitrate = (_bitrate), \
113 .flags = (_flags), \
114 .hw_value = (_hw_rate), \
115 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
116}
117
118static struct ieee80211_rate ath9k_legacy_rates[] = {
119 RATE(10, 0x1b, 0),
120 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
122 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
123 RATE(60, 0x0b, 0),
124 RATE(90, 0x0f, 0),
125 RATE(120, 0x0a, 0),
126 RATE(180, 0x0e, 0),
127 RATE(240, 0x09, 0),
128 RATE(360, 0x0d, 0),
129 RATE(480, 0x08, 0),
130 RATE(540, 0x0c, 0),
131};
132
133static void ath_cache_conf_rate(struct ath_softc *sc, 21static void ath_cache_conf_rate(struct ath_softc *sc,
134 struct ieee80211_conf *conf) 22 struct ieee80211_conf *conf)
135{ 23{
@@ -221,7 +109,7 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
221 return channel; 109 return channel;
222} 110}
223 111
224static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) 112bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
225{ 113{
226 unsigned long flags; 114 unsigned long flags;
227 bool ret; 115 bool ret;
@@ -256,10 +144,10 @@ void ath9k_ps_restore(struct ath_softc *sc)
256 goto unlock; 144 goto unlock;
257 145
258 if (sc->ps_enabled && 146 if (sc->ps_enabled &&
259 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 147 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
260 SC_OP_WAIT_FOR_CAB | 148 PS_WAIT_FOR_CAB |
261 SC_OP_WAIT_FOR_PSPOLL_DATA | 149 PS_WAIT_FOR_PSPOLL_DATA |
262 SC_OP_WAIT_FOR_TX_ACK))) 150 PS_WAIT_FOR_TX_ACK)))
263 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP); 151 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
264 152
265 unlock: 153 unlock:
@@ -349,7 +237,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
349 * When the task is complete, it reschedules itself depending on the 237 * When the task is complete, it reschedules itself depending on the
350 * appropriate interval that was calculated. 238 * appropriate interval that was calculated.
351 */ 239 */
352static void ath_ani_calibrate(unsigned long data) 240void ath_ani_calibrate(unsigned long data)
353{ 241{
354 struct ath_softc *sc = (struct ath_softc *)data; 242 struct ath_softc *sc = (struct ath_softc *)data;
355 struct ath_hw *ah = sc->sc_ah; 243 struct ath_hw *ah = sc->sc_ah;
@@ -363,14 +251,6 @@ static void ath_ani_calibrate(unsigned long data)
363 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ? 251 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
364 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; 252 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
365 253
366 /*
367 * don't calibrate when we're scanning.
368 * we are most likely not on our home channel.
369 */
370 spin_lock(&sc->ani_lock);
371 if (sc->sc_flags & SC_OP_SCANNING)
372 goto set_timer;
373
374 /* Only calibrate if awake */ 254 /* Only calibrate if awake */
375 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) 255 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
376 goto set_timer; 256 goto set_timer;
@@ -437,7 +317,6 @@ static void ath_ani_calibrate(unsigned long data)
437 ath9k_ps_restore(sc); 317 ath9k_ps_restore(sc);
438 318
439set_timer: 319set_timer:
440 spin_unlock(&sc->ani_lock);
441 /* 320 /*
442 * Set timer interval based on previous results. 321 * Set timer interval based on previous results.
443 * The interval must be the shortest necessary to satisfy ANI, 322 * The interval must be the shortest necessary to satisfy ANI,
@@ -513,7 +392,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
513 ath_tx_node_cleanup(sc, an); 392 ath_tx_node_cleanup(sc, an);
514} 393}
515 394
516static void ath9k_tasklet(unsigned long data) 395void ath9k_tasklet(unsigned long data)
517{ 396{
518 struct ath_softc *sc = (struct ath_softc *)data; 397 struct ath_softc *sc = (struct ath_softc *)data;
519 struct ath_hw *ah = sc->sc_ah; 398 struct ath_hw *ah = sc->sc_ah;
@@ -545,7 +424,7 @@ static void ath9k_tasklet(unsigned long data)
545 */ 424 */
546 ath_print(common, ATH_DBG_PS, 425 ath_print(common, ATH_DBG_PS,
547 "TSFOOR - Sync with next Beacon\n"); 426 "TSFOOR - Sync with next Beacon\n");
548 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC; 427 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
549 } 428 }
550 429
551 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 430 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -646,7 +525,7 @@ irqreturn_t ath_isr(int irq, void *dev)
646 * receive frames */ 525 * receive frames */
647 ath9k_setpower(sc, ATH9K_PM_AWAKE); 526 ath9k_setpower(sc, ATH9K_PM_AWAKE);
648 ath9k_hw_setrxabort(sc->sc_ah, 0); 527 ath9k_hw_setrxabort(sc->sc_ah, 0);
649 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON; 528 sc->ps_flags |= PS_WAIT_FOR_BEACON;
650 } 529 }
651 530
652chip_reset: 531chip_reset:
@@ -933,44 +812,6 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
933 } 812 }
934} 813}
935 814
936static void setup_ht_cap(struct ath_softc *sc,
937 struct ieee80211_sta_ht_cap *ht_info)
938{
939 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
940 u8 tx_streams, rx_streams;
941
942 ht_info->ht_supported = true;
943 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
944 IEEE80211_HT_CAP_SM_PS |
945 IEEE80211_HT_CAP_SGI_40 |
946 IEEE80211_HT_CAP_DSSSCCK40;
947
948 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
949 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
950
951 /* set up supported mcs set */
952 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
953 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
954 1 : 2;
955 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
956 1 : 2;
957
958 if (tx_streams != rx_streams) {
959 ath_print(common, ATH_DBG_CONFIG,
960 "TX streams %d, RX streams: %d\n",
961 tx_streams, rx_streams);
962 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
963 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
964 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
965 }
966
967 ht_info->mcs.rx_mask[0] = 0xff;
968 if (rx_streams >= 2)
969 ht_info->mcs.rx_mask[1] = 0xff;
970
971 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
972}
973
974static void ath9k_bss_assoc_info(struct ath_softc *sc, 815static void ath9k_bss_assoc_info(struct ath_softc *sc,
975 struct ieee80211_vif *vif, 816 struct ieee80211_vif *vif,
976 struct ieee80211_bss_conf *bss_conf) 817 struct ieee80211_bss_conf *bss_conf)
@@ -992,7 +833,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
992 * on the receipt of the first Beacon frame (i.e., 833 * on the receipt of the first Beacon frame (i.e.,
993 * after time sync with the AP). 834 * after time sync with the AP).
994 */ 835 */
995 sc->sc_flags |= SC_OP_BEACON_SYNC; 836 sc->ps_flags |= PS_BEACON_SYNC;
996 837
997 /* Configure the beacon */ 838 /* Configure the beacon */
998 ath_beacon_config(sc, vif); 839 ath_beacon_config(sc, vif);
@@ -1009,174 +850,6 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
1009 } 850 }
1010} 851}
1011 852
1012/********************************/
1013/* LED functions */
1014/********************************/
1015
1016static void ath_led_blink_work(struct work_struct *work)
1017{
1018 struct ath_softc *sc = container_of(work, struct ath_softc,
1019 ath_led_blink_work.work);
1020
1021 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
1022 return;
1023
1024 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
1025 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
1026 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
1027 else
1028 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
1029 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
1030
1031 ieee80211_queue_delayed_work(sc->hw,
1032 &sc->ath_led_blink_work,
1033 (sc->sc_flags & SC_OP_LED_ON) ?
1034 msecs_to_jiffies(sc->led_off_duration) :
1035 msecs_to_jiffies(sc->led_on_duration));
1036
1037 sc->led_on_duration = sc->led_on_cnt ?
1038 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
1039 ATH_LED_ON_DURATION_IDLE;
1040 sc->led_off_duration = sc->led_off_cnt ?
1041 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
1042 ATH_LED_OFF_DURATION_IDLE;
1043 sc->led_on_cnt = sc->led_off_cnt = 0;
1044 if (sc->sc_flags & SC_OP_LED_ON)
1045 sc->sc_flags &= ~SC_OP_LED_ON;
1046 else
1047 sc->sc_flags |= SC_OP_LED_ON;
1048}
1049
1050static void ath_led_brightness(struct led_classdev *led_cdev,
1051 enum led_brightness brightness)
1052{
1053 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
1054 struct ath_softc *sc = led->sc;
1055
1056 switch (brightness) {
1057 case LED_OFF:
1058 if (led->led_type == ATH_LED_ASSOC ||
1059 led->led_type == ATH_LED_RADIO) {
1060 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
1061 (led->led_type == ATH_LED_RADIO));
1062 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1063 if (led->led_type == ATH_LED_RADIO)
1064 sc->sc_flags &= ~SC_OP_LED_ON;
1065 } else {
1066 sc->led_off_cnt++;
1067 }
1068 break;
1069 case LED_FULL:
1070 if (led->led_type == ATH_LED_ASSOC) {
1071 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
1072 ieee80211_queue_delayed_work(sc->hw,
1073 &sc->ath_led_blink_work, 0);
1074 } else if (led->led_type == ATH_LED_RADIO) {
1075 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
1076 sc->sc_flags |= SC_OP_LED_ON;
1077 } else {
1078 sc->led_on_cnt++;
1079 }
1080 break;
1081 default:
1082 break;
1083 }
1084}
1085
1086static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
1087 char *trigger)
1088{
1089 int ret;
1090
1091 led->sc = sc;
1092 led->led_cdev.name = led->name;
1093 led->led_cdev.default_trigger = trigger;
1094 led->led_cdev.brightness_set = ath_led_brightness;
1095
1096 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1097 if (ret)
1098 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1099 "Failed to register led:%s", led->name);
1100 else
1101 led->registered = 1;
1102 return ret;
1103}
1104
1105static void ath_unregister_led(struct ath_led *led)
1106{
1107 if (led->registered) {
1108 led_classdev_unregister(&led->led_cdev);
1109 led->registered = 0;
1110 }
1111}
1112
1113static void ath_deinit_leds(struct ath_softc *sc)
1114{
1115 ath_unregister_led(&sc->assoc_led);
1116 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1117 ath_unregister_led(&sc->tx_led);
1118 ath_unregister_led(&sc->rx_led);
1119 ath_unregister_led(&sc->radio_led);
1120 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
1121}
1122
1123static void ath_init_leds(struct ath_softc *sc)
1124{
1125 char *trigger;
1126 int ret;
1127
1128 if (AR_SREV_9287(sc->sc_ah))
1129 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
1130 else
1131 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
1132
1133 /* Configure gpio 1 for output */
1134 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
1135 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1136 /* LED off, active low */
1137 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
1138
1139 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1140
1141 trigger = ieee80211_get_radio_led_name(sc->hw);
1142 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1143 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1144 ret = ath_register_led(sc, &sc->radio_led, trigger);
1145 sc->radio_led.led_type = ATH_LED_RADIO;
1146 if (ret)
1147 goto fail;
1148
1149 trigger = ieee80211_get_assoc_led_name(sc->hw);
1150 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1151 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1152 ret = ath_register_led(sc, &sc->assoc_led, trigger);
1153 sc->assoc_led.led_type = ATH_LED_ASSOC;
1154 if (ret)
1155 goto fail;
1156
1157 trigger = ieee80211_get_tx_led_name(sc->hw);
1158 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1159 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1160 ret = ath_register_led(sc, &sc->tx_led, trigger);
1161 sc->tx_led.led_type = ATH_LED_TX;
1162 if (ret)
1163 goto fail;
1164
1165 trigger = ieee80211_get_rx_led_name(sc->hw);
1166 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1167 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1168 ret = ath_register_led(sc, &sc->rx_led, trigger);
1169 sc->rx_led.led_type = ATH_LED_RX;
1170 if (ret)
1171 goto fail;
1172
1173 return;
1174
1175fail:
1176 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1177 ath_deinit_leds(sc);
1178}
1179
1180void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) 853void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
1181{ 854{
1182 struct ath_hw *ah = sc->sc_ah; 855 struct ath_hw *ah = sc->sc_ah;
@@ -1261,711 +934,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
1261 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 934 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
1262} 935}
1263 936
1264/*******************/
1265/* Rfkill */
1266/*******************/
1267
1268static bool ath_is_rfkill_set(struct ath_softc *sc)
1269{
1270 struct ath_hw *ah = sc->sc_ah;
1271
1272 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1273 ah->rfkill_polarity;
1274}
1275
1276static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
1277{
1278 struct ath_wiphy *aphy = hw->priv;
1279 struct ath_softc *sc = aphy->sc;
1280 bool blocked = !!ath_is_rfkill_set(sc);
1281
1282 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1283}
1284
1285static void ath_start_rfkill_poll(struct ath_softc *sc)
1286{
1287 struct ath_hw *ah = sc->sc_ah;
1288
1289 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1290 wiphy_rfkill_start_polling(sc->hw->wiphy);
1291}
1292
1293static void ath9k_uninit_hw(struct ath_softc *sc)
1294{
1295 struct ath_hw *ah = sc->sc_ah;
1296
1297 BUG_ON(!ah);
1298
1299 ath9k_exit_debug(ah);
1300 ath9k_hw_detach(ah);
1301 sc->sc_ah = NULL;
1302}
1303
1304static void ath_clean_core(struct ath_softc *sc)
1305{
1306 struct ieee80211_hw *hw = sc->hw;
1307 struct ath_hw *ah = sc->sc_ah;
1308 int i = 0;
1309
1310 ath9k_ps_wakeup(sc);
1311
1312 dev_dbg(sc->dev, "Detach ATH hw\n");
1313
1314 ath_deinit_leds(sc);
1315 wiphy_rfkill_stop_polling(sc->hw->wiphy);
1316
1317 for (i = 0; i < sc->num_sec_wiphy; i++) {
1318 struct ath_wiphy *aphy = sc->sec_wiphy[i];
1319 if (aphy == NULL)
1320 continue;
1321 sc->sec_wiphy[i] = NULL;
1322 ieee80211_unregister_hw(aphy->hw);
1323 ieee80211_free_hw(aphy->hw);
1324 }
1325 ieee80211_unregister_hw(hw);
1326 ath_rx_cleanup(sc);
1327 ath_tx_cleanup(sc);
1328
1329 tasklet_kill(&sc->intr_tq);
1330 tasklet_kill(&sc->bcon_tasklet);
1331
1332 if (!(sc->sc_flags & SC_OP_INVALID))
1333 ath9k_setpower(sc, ATH9K_PM_AWAKE);
1334
1335 /* cleanup tx queues */
1336 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1337 if (ATH_TXQ_SETUP(sc, i))
1338 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1339
1340 if ((sc->btcoex.no_stomp_timer) &&
1341 ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1342 ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
1343}
1344
1345void ath_detach(struct ath_softc *sc)
1346{
1347 ath_clean_core(sc);
1348 ath9k_uninit_hw(sc);
1349}
1350
1351void ath_cleanup(struct ath_softc *sc)
1352{
1353 struct ath_hw *ah = sc->sc_ah;
1354 struct ath_common *common = ath9k_hw_common(ah);
1355
1356 ath_clean_core(sc);
1357 free_irq(sc->irq, sc);
1358 ath_bus_cleanup(common);
1359 kfree(sc->sec_wiphy);
1360 ieee80211_free_hw(sc->hw);
1361
1362 ath9k_uninit_hw(sc);
1363}
1364
1365static int ath9k_reg_notifier(struct wiphy *wiphy,
1366 struct regulatory_request *request)
1367{
1368 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1369 struct ath_wiphy *aphy = hw->priv;
1370 struct ath_softc *sc = aphy->sc;
1371 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
1372
1373 return ath_reg_notifier_apply(wiphy, request, reg);
1374}
1375
1376/*
1377 * Detects if there is any priority bt traffic
1378 */
1379static void ath_detect_bt_priority(struct ath_softc *sc)
1380{
1381 struct ath_btcoex *btcoex = &sc->btcoex;
1382 struct ath_hw *ah = sc->sc_ah;
1383
1384 if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
1385 btcoex->bt_priority_cnt++;
1386
1387 if (time_after(jiffies, btcoex->bt_priority_time +
1388 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
1389 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
1390 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
1391 "BT priority traffic detected");
1392 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
1393 } else {
1394 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
1395 }
1396
1397 btcoex->bt_priority_cnt = 0;
1398 btcoex->bt_priority_time = jiffies;
1399 }
1400}
1401
1402/*
1403 * Configures appropriate weight based on stomp type.
1404 */
1405static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
1406 enum ath_stomp_type stomp_type)
1407{
1408 struct ath_hw *ah = sc->sc_ah;
1409
1410 switch (stomp_type) {
1411 case ATH_BTCOEX_STOMP_ALL:
1412 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1413 AR_STOMP_ALL_WLAN_WGHT);
1414 break;
1415 case ATH_BTCOEX_STOMP_LOW:
1416 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1417 AR_STOMP_LOW_WLAN_WGHT);
1418 break;
1419 case ATH_BTCOEX_STOMP_NONE:
1420 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1421 AR_STOMP_NONE_WLAN_WGHT);
1422 break;
1423 default:
1424 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1425 "Invalid Stomptype\n");
1426 break;
1427 }
1428
1429 ath9k_hw_btcoex_enable(ah);
1430}
1431
1432static void ath9k_gen_timer_start(struct ath_hw *ah,
1433 struct ath_gen_timer *timer,
1434 u32 timer_next,
1435 u32 timer_period)
1436{
1437 struct ath_common *common = ath9k_hw_common(ah);
1438 struct ath_softc *sc = (struct ath_softc *) common->priv;
1439
1440 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
1441
1442 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
1443 ath9k_hw_set_interrupts(ah, 0);
1444 sc->imask |= ATH9K_INT_GENTIMER;
1445 ath9k_hw_set_interrupts(ah, sc->imask);
1446 }
1447}
1448
1449static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
1450{
1451 struct ath_common *common = ath9k_hw_common(ah);
1452 struct ath_softc *sc = (struct ath_softc *) common->priv;
1453 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
1454
1455 ath9k_hw_gen_timer_stop(ah, timer);
1456
1457 /* if no timer is enabled, turn off interrupt mask */
1458 if (timer_table->timer_mask.val == 0) {
1459 ath9k_hw_set_interrupts(ah, 0);
1460 sc->imask &= ~ATH9K_INT_GENTIMER;
1461 ath9k_hw_set_interrupts(ah, sc->imask);
1462 }
1463}
1464
1465/*
1466 * This is the master bt coex timer which runs for every
1467 * 45ms, bt traffic will be given priority during 55% of this
1468 * period while wlan gets remaining 45%
1469 */
1470static void ath_btcoex_period_timer(unsigned long data)
1471{
1472 struct ath_softc *sc = (struct ath_softc *) data;
1473 struct ath_hw *ah = sc->sc_ah;
1474 struct ath_btcoex *btcoex = &sc->btcoex;
1475
1476 ath_detect_bt_priority(sc);
1477
1478 spin_lock_bh(&btcoex->btcoex_lock);
1479
1480 ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
1481
1482 spin_unlock_bh(&btcoex->btcoex_lock);
1483
1484 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
1485 if (btcoex->hw_timer_enabled)
1486 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
1487
1488 ath9k_gen_timer_start(ah,
1489 btcoex->no_stomp_timer,
1490 (ath9k_hw_gettsf32(ah) +
1491 btcoex->btcoex_no_stomp),
1492 btcoex->btcoex_no_stomp * 10);
1493 btcoex->hw_timer_enabled = true;
1494 }
1495
1496 mod_timer(&btcoex->period_timer, jiffies +
1497 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
1498}
1499
1500/*
1501 * Generic tsf based hw timer which configures weight
1502 * registers to time slice between wlan and bt traffic
1503 */
1504static void ath_btcoex_no_stomp_timer(void *arg)
1505{
1506 struct ath_softc *sc = (struct ath_softc *)arg;
1507 struct ath_hw *ah = sc->sc_ah;
1508 struct ath_btcoex *btcoex = &sc->btcoex;
1509
1510 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1511 "no stomp timer running \n");
1512
1513 spin_lock_bh(&btcoex->btcoex_lock);
1514
1515 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
1516 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
1517 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
1518 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
1519
1520 spin_unlock_bh(&btcoex->btcoex_lock);
1521}
1522
1523static int ath_init_btcoex_timer(struct ath_softc *sc)
1524{
1525 struct ath_btcoex *btcoex = &sc->btcoex;
1526
1527 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
1528 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
1529 btcoex->btcoex_period / 100;
1530
1531 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
1532 (unsigned long) sc);
1533
1534 spin_lock_init(&btcoex->btcoex_lock);
1535
1536 btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
1537 ath_btcoex_no_stomp_timer,
1538 ath_btcoex_no_stomp_timer,
1539 (void *) sc, AR_FIRST_NDP_TIMER);
1540
1541 if (!btcoex->no_stomp_timer)
1542 return -ENOMEM;
1543
1544 return 0;
1545}
1546
1547/*
1548 * Read and write, they both share the same lock. We do this to serialize
1549 * reads and writes on Atheros 802.11n PCI devices only. This is required
1550 * as the FIFO on these devices can only accept sanely 2 requests. After
1551 * that the device goes bananas. Serializing the reads/writes prevents this
1552 * from happening.
1553 */
1554
1555static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
1556{
1557 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1558 struct ath_common *common = ath9k_hw_common(ah);
1559 struct ath_softc *sc = (struct ath_softc *) common->priv;
1560
1561 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1562 unsigned long flags;
1563 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1564 iowrite32(val, sc->mem + reg_offset);
1565 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1566 } else
1567 iowrite32(val, sc->mem + reg_offset);
1568}
1569
1570static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
1571{
1572 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1573 struct ath_common *common = ath9k_hw_common(ah);
1574 struct ath_softc *sc = (struct ath_softc *) common->priv;
1575 u32 val;
1576
1577 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1578 unsigned long flags;
1579 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1580 val = ioread32(sc->mem + reg_offset);
1581 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1582 } else
1583 val = ioread32(sc->mem + reg_offset);
1584 return val;
1585}
1586
1587static const struct ath_ops ath9k_common_ops = {
1588 .read = ath9k_ioread32,
1589 .write = ath9k_iowrite32,
1590};
1591
1592/*
1593 * Initialize and fill ath_softc, ath_sofct is the
1594 * "Software Carrier" struct. Historically it has existed
1595 * to allow the separation between hardware specific
1596 * variables (now in ath_hw) and driver specific variables.
1597 */
1598static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
1599 const struct ath_bus_ops *bus_ops)
1600{
1601 struct ath_hw *ah = NULL;
1602 struct ath_common *common;
1603 int r = 0, i;
1604 int csz = 0;
1605 int qnum;
1606
1607 /* XXX: hardware will not be ready until ath_open() being called */
1608 sc->sc_flags |= SC_OP_INVALID;
1609
1610 spin_lock_init(&sc->wiphy_lock);
1611 spin_lock_init(&sc->sc_resetlock);
1612 spin_lock_init(&sc->sc_serial_rw);
1613 spin_lock_init(&sc->ani_lock);
1614 spin_lock_init(&sc->sc_pm_lock);
1615 mutex_init(&sc->mutex);
1616 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1617 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1618 (unsigned long)sc);
1619
1620 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
1621 if (!ah)
1622 return -ENOMEM;
1623
1624 ah->hw_version.devid = devid;
1625 ah->hw_version.subsysid = subsysid;
1626 sc->sc_ah = ah;
1627
1628 common = ath9k_hw_common(ah);
1629 common->ops = &ath9k_common_ops;
1630 common->bus_ops = bus_ops;
1631 common->ah = ah;
1632 common->hw = sc->hw;
1633 common->priv = sc;
1634 common->debug_mask = ath9k_debug;
1635
1636 /*
1637 * Cache line size is used to size and align various
1638 * structures used to communicate with the hardware.
1639 */
1640 ath_read_cachesize(common, &csz);
1641 /* XXX assert csz is non-zero */
1642 common->cachelsz = csz << 2; /* convert to bytes */
1643
1644 r = ath9k_hw_init(ah);
1645 if (r) {
1646 ath_print(common, ATH_DBG_FATAL,
1647 "Unable to initialize hardware; "
1648 "initialization status: %d\n", r);
1649 goto bad_free_hw;
1650 }
1651
1652 if (ath9k_init_debug(ah) < 0) {
1653 ath_print(common, ATH_DBG_FATAL,
1654 "Unable to create debugfs files\n");
1655 goto bad_free_hw;
1656 }
1657
1658 /* Get the hardware key cache size. */
1659 common->keymax = ah->caps.keycache_size;
1660 if (common->keymax > ATH_KEYMAX) {
1661 ath_print(common, ATH_DBG_ANY,
1662 "Warning, using only %u entries in %u key cache\n",
1663 ATH_KEYMAX, common->keymax);
1664 common->keymax = ATH_KEYMAX;
1665 }
1666
1667 /*
1668 * Reset the key cache since some parts do not
1669 * reset the contents on initial power up.
1670 */
1671 for (i = 0; i < common->keymax; i++)
1672 ath9k_hw_keyreset(ah, (u16) i);
1673
1674 /* default to MONITOR mode */
1675 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1676
1677 /*
1678 * Allocate hardware transmit queues: one queue for
1679 * beacon frames and one data queue for each QoS
1680 * priority. Note that the hal handles reseting
1681 * these queues at the needed time.
1682 */
1683 sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
1684 if (sc->beacon.beaconq == -1) {
1685 ath_print(common, ATH_DBG_FATAL,
1686 "Unable to setup a beacon xmit queue\n");
1687 r = -EIO;
1688 goto bad2;
1689 }
1690 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1691 if (sc->beacon.cabq == NULL) {
1692 ath_print(common, ATH_DBG_FATAL,
1693 "Unable to setup CAB xmit queue\n");
1694 r = -EIO;
1695 goto bad2;
1696 }
1697
1698 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1699 ath_cabq_update(sc);
1700
1701 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1702 sc->tx.hwq_map[i] = -1;
1703
1704 /* Setup data queues */
1705 /* NB: ensure BK queue is the lowest priority h/w queue */
1706 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1707 ath_print(common, ATH_DBG_FATAL,
1708 "Unable to setup xmit queue for BK traffic\n");
1709 r = -EIO;
1710 goto bad2;
1711 }
1712
1713 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1714 ath_print(common, ATH_DBG_FATAL,
1715 "Unable to setup xmit queue for BE traffic\n");
1716 r = -EIO;
1717 goto bad2;
1718 }
1719 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1720 ath_print(common, ATH_DBG_FATAL,
1721 "Unable to setup xmit queue for VI traffic\n");
1722 r = -EIO;
1723 goto bad2;
1724 }
1725 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1726 ath_print(common, ATH_DBG_FATAL,
1727 "Unable to setup xmit queue for VO traffic\n");
1728 r = -EIO;
1729 goto bad2;
1730 }
1731
1732 /* Initializes the noise floor to a reasonable default value.
1733 * Later on this will be updated during ANI processing. */
1734
1735 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1736 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1737
1738 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1739 ATH9K_CIPHER_TKIP, NULL)) {
1740 /*
1741 * Whether we should enable h/w TKIP MIC.
1742 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1743 * report WMM capable, so it's always safe to turn on
1744 * TKIP MIC in this case.
1745 */
1746 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1747 0, 1, NULL);
1748 }
1749
1750 /*
1751 * Check whether the separate key cache entries
1752 * are required to handle both tx+rx MIC keys.
1753 * With split mic keys the number of stations is limited
1754 * to 27 otherwise 59.
1755 */
1756 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1757 ATH9K_CIPHER_TKIP, NULL)
1758 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1759 ATH9K_CIPHER_MIC, NULL)
1760 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1761 0, NULL))
1762 common->splitmic = 1;
1763
1764 /* turn on mcast key search if possible */
1765 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1766 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1767 1, NULL);
1768
1769 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1770
1771 /* 11n Capabilities */
1772 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1773 sc->sc_flags |= SC_OP_TXAGGR;
1774 sc->sc_flags |= SC_OP_RXAGGR;
1775 }
1776
1777 common->tx_chainmask = ah->caps.tx_chainmask;
1778 common->rx_chainmask = ah->caps.rx_chainmask;
1779
1780 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1781 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1782
1783 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1784 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
1785
1786 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1787
1788 /* initialize beacon slots */
1789 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
1790 sc->beacon.bslot[i] = NULL;
1791 sc->beacon.bslot_aphy[i] = NULL;
1792 }
1793
1794 /* setup channels and rates */
1795
1796 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
1797 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1798 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1799 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1800 ARRAY_SIZE(ath9k_2ghz_chantable);
1801 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
1802 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
1803 ARRAY_SIZE(ath9k_legacy_rates);
1804 }
1805
1806 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1807 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1808 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1809 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1810 ARRAY_SIZE(ath9k_5ghz_chantable);
1811 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1812 ath9k_legacy_rates + 4;
1813 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
1814 ARRAY_SIZE(ath9k_legacy_rates) - 4;
1815 }
1816
1817 switch (ah->btcoex_hw.scheme) {
1818 case ATH_BTCOEX_CFG_NONE:
1819 break;
1820 case ATH_BTCOEX_CFG_2WIRE:
1821 ath9k_hw_btcoex_init_2wire(ah);
1822 break;
1823 case ATH_BTCOEX_CFG_3WIRE:
1824 ath9k_hw_btcoex_init_3wire(ah);
1825 r = ath_init_btcoex_timer(sc);
1826 if (r)
1827 goto bad2;
1828 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
1829 ath9k_hw_init_btcoex_hw(ah, qnum);
1830 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
1831 break;
1832 default:
1833 WARN_ON(1);
1834 break;
1835 }
1836
1837 return 0;
1838bad2:
1839 /* cleanup tx queues */
1840 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1841 if (ATH_TXQ_SETUP(sc, i))
1842 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1843
1844bad_free_hw:
1845 ath9k_uninit_hw(sc);
1846 return r;
1847}
1848
1849void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1850{
1851 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1852 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1853 IEEE80211_HW_SIGNAL_DBM |
1854 IEEE80211_HW_AMPDU_AGGREGATION |
1855 IEEE80211_HW_SUPPORTS_PS |
1856 IEEE80211_HW_PS_NULLFUNC_STACK |
1857 IEEE80211_HW_SPECTRUM_MGMT;
1858
1859 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1860 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1861
1862 hw->wiphy->interface_modes =
1863 BIT(NL80211_IFTYPE_AP) |
1864 BIT(NL80211_IFTYPE_STATION) |
1865 BIT(NL80211_IFTYPE_ADHOC) |
1866 BIT(NL80211_IFTYPE_MESH_POINT);
1867
1868 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1869
1870 hw->queues = 4;
1871 hw->max_rates = 4;
1872 hw->channel_change_time = 5000;
1873 hw->max_listen_interval = 10;
1874 /* Hardware supports 10 but we use 4 */
1875 hw->max_rate_tries = 4;
1876 hw->sta_data_size = sizeof(struct ath_node);
1877 hw->vif_data_size = sizeof(struct ath_vif);
1878
1879 hw->rate_control_algorithm = "ath9k_rate_control";
1880
1881 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
1882 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1883 &sc->sbands[IEEE80211_BAND_2GHZ];
1884 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1885 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1886 &sc->sbands[IEEE80211_BAND_5GHZ];
1887}
1888
1889/* Device driver core initialization */
1890int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
1891 const struct ath_bus_ops *bus_ops)
1892{
1893 struct ieee80211_hw *hw = sc->hw;
1894 struct ath_common *common;
1895 struct ath_hw *ah;
1896 int error = 0, i;
1897 struct ath_regulatory *reg;
1898
1899 dev_dbg(sc->dev, "Attach ATH hw\n");
1900
1901 error = ath_init_softc(devid, sc, subsysid, bus_ops);
1902 if (error != 0)
1903 return error;
1904
1905 ah = sc->sc_ah;
1906 common = ath9k_hw_common(ah);
1907
1908 /* get mac address from hardware and set in mac80211 */
1909
1910 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
1911
1912 ath_set_hw_capab(sc, hw);
1913
1914 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
1915 ath9k_reg_notifier);
1916 if (error)
1917 return error;
1918
1919 reg = &common->regulatory;
1920
1921 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1922 if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
1923 setup_ht_cap(sc,
1924 &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1925 if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
1926 setup_ht_cap(sc,
1927 &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1928 }
1929
1930 /* initialize tx/rx engine */
1931 error = ath_tx_init(sc, ATH_TXBUF);
1932 if (error != 0)
1933 goto error_attach;
1934
1935 error = ath_rx_init(sc, ATH_RXBUF);
1936 if (error != 0)
1937 goto error_attach;
1938
1939 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
1940 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
1941 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
1942
1943 error = ieee80211_register_hw(hw);
1944
1945 if (!ath_is_world_regd(reg)) {
1946 error = regulatory_hint(hw->wiphy, reg->alpha2);
1947 if (error)
1948 goto error_attach;
1949 }
1950
1951 /* Initialize LED control */
1952 ath_init_leds(sc);
1953
1954 ath_start_rfkill_poll(sc);
1955
1956 return 0;
1957
1958error_attach:
1959 /* cleanup tx queues */
1960 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1961 if (ATH_TXQ_SETUP(sc, i))
1962 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1963
1964 ath9k_uninit_hw(sc);
1965
1966 return error;
1967}
1968
1969int ath_reset(struct ath_softc *sc, bool retry_tx) 937int ath_reset(struct ath_softc *sc, bool retry_tx)
1970{ 938{
1971 struct ath_hw *ah = sc->sc_ah; 939 struct ath_hw *ah = sc->sc_ah;
@@ -1976,6 +944,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1976 /* Stop ANI */ 944 /* Stop ANI */
1977 del_timer_sync(&common->ani.timer); 945 del_timer_sync(&common->ani.timer);
1978 946
947 ieee80211_stop_queues(hw);
948
1979 ath9k_hw_set_interrupts(ah, 0); 949 ath9k_hw_set_interrupts(ah, 0);
1980 ath_drain_all_txq(sc, retry_tx); 950 ath_drain_all_txq(sc, retry_tx);
1981 ath_stoprecv(sc); 951 ath_stoprecv(sc);
@@ -2017,131 +987,14 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
2017 } 987 }
2018 } 988 }
2019 989
990 ieee80211_wake_queues(hw);
991
2020 /* Start ANI */ 992 /* Start ANI */
2021 ath_start_ani(common); 993 ath_start_ani(common);
2022 994
2023 return r; 995 return r;
2024} 996}
2025 997
2026/*
2027 * This function will allocate both the DMA descriptor structure, and the
2028 * buffers it contains. These are used to contain the descriptors used
2029 * by the system.
2030*/
2031int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
2032 struct list_head *head, const char *name,
2033 int nbuf, int ndesc)
2034{
2035#define DS2PHYS(_dd, _ds) \
2036 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2037#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
2038#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
2039 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2040 struct ath_desc *ds;
2041 struct ath_buf *bf;
2042 int i, bsize, error;
2043
2044 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
2045 name, nbuf, ndesc);
2046
2047 INIT_LIST_HEAD(head);
2048 /* ath_desc must be a multiple of DWORDs */
2049 if ((sizeof(struct ath_desc) % 4) != 0) {
2050 ath_print(common, ATH_DBG_FATAL,
2051 "ath_desc not DWORD aligned\n");
2052 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
2053 error = -ENOMEM;
2054 goto fail;
2055 }
2056
2057 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
2058
2059 /*
2060 * Need additional DMA memory because we can't use
2061 * descriptors that cross the 4K page boundary. Assume
2062 * one skipped descriptor per 4K page.
2063 */
2064 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
2065 u32 ndesc_skipped =
2066 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
2067 u32 dma_len;
2068
2069 while (ndesc_skipped) {
2070 dma_len = ndesc_skipped * sizeof(struct ath_desc);
2071 dd->dd_desc_len += dma_len;
2072
2073 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
2074 };
2075 }
2076
2077 /* allocate descriptors */
2078 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2079 &dd->dd_desc_paddr, GFP_KERNEL);
2080 if (dd->dd_desc == NULL) {
2081 error = -ENOMEM;
2082 goto fail;
2083 }
2084 ds = dd->dd_desc;
2085 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
2086 name, ds, (u32) dd->dd_desc_len,
2087 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
2088
2089 /* allocate buffers */
2090 bsize = sizeof(struct ath_buf) * nbuf;
2091 bf = kzalloc(bsize, GFP_KERNEL);
2092 if (bf == NULL) {
2093 error = -ENOMEM;
2094 goto fail2;
2095 }
2096 dd->dd_bufptr = bf;
2097
2098 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
2099 bf->bf_desc = ds;
2100 bf->bf_daddr = DS2PHYS(dd, ds);
2101
2102 if (!(sc->sc_ah->caps.hw_caps &
2103 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
2104 /*
2105 * Skip descriptor addresses which can cause 4KB
2106 * boundary crossing (addr + length) with a 32 dword
2107 * descriptor fetch.
2108 */
2109 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
2110 BUG_ON((caddr_t) bf->bf_desc >=
2111 ((caddr_t) dd->dd_desc +
2112 dd->dd_desc_len));
2113
2114 ds += ndesc;
2115 bf->bf_desc = ds;
2116 bf->bf_daddr = DS2PHYS(dd, ds);
2117 }
2118 }
2119 list_add_tail(&bf->list, head);
2120 }
2121 return 0;
2122fail2:
2123 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2124 dd->dd_desc_paddr);
2125fail:
2126 memset(dd, 0, sizeof(*dd));
2127 return error;
2128#undef ATH_DESC_4KB_BOUND_CHECK
2129#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
2130#undef DS2PHYS
2131}
2132
2133void ath_descdma_cleanup(struct ath_softc *sc,
2134 struct ath_descdma *dd,
2135 struct list_head *head)
2136{
2137 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2138 dd->dd_desc_paddr);
2139
2140 INIT_LIST_HEAD(head);
2141 kfree(dd->dd_bufptr);
2142 memset(dd, 0, sizeof(*dd));
2143}
2144
2145int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) 998int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
2146{ 999{
2147 int qnum; 1000 int qnum;
@@ -2220,28 +1073,6 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
2220/* mac80211 callbacks */ 1073/* mac80211 callbacks */
2221/**********************/ 1074/**********************/
2222 1075
2223/*
2224 * (Re)start btcoex timers
2225 */
2226static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
2227{
2228 struct ath_btcoex *btcoex = &sc->btcoex;
2229 struct ath_hw *ah = sc->sc_ah;
2230
2231 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
2232 "Starting btcoex timers");
2233
2234 /* make sure duty cycle timer is also stopped when resuming */
2235 if (btcoex->hw_timer_enabled)
2236 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
2237
2238 btcoex->bt_priority_cnt = 0;
2239 btcoex->bt_priority_time = jiffies;
2240 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
2241
2242 mod_timer(&btcoex->period_timer, jiffies);
2243}
2244
2245static int ath9k_start(struct ieee80211_hw *hw) 1076static int ath9k_start(struct ieee80211_hw *hw)
2246{ 1077{
2247 struct ath_wiphy *aphy = hw->priv; 1078 struct ath_wiphy *aphy = hw->priv;
@@ -2411,11 +1242,11 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2411 if (ieee80211_is_pspoll(hdr->frame_control)) { 1242 if (ieee80211_is_pspoll(hdr->frame_control)) {
2412 ath_print(common, ATH_DBG_PS, 1243 ath_print(common, ATH_DBG_PS,
2413 "Sending PS-Poll to pick a buffered frame\n"); 1244 "Sending PS-Poll to pick a buffered frame\n");
2414 sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA; 1245 sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
2415 } else { 1246 } else {
2416 ath_print(common, ATH_DBG_PS, 1247 ath_print(common, ATH_DBG_PS,
2417 "Wake up to complete TX\n"); 1248 "Wake up to complete TX\n");
2418 sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK; 1249 sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
2419 } 1250 }
2420 /* 1251 /*
2421 * The actual restore operation will happen only after 1252 * The actual restore operation will happen only after
@@ -2468,22 +1299,6 @@ exit:
2468 return 0; 1299 return 0;
2469} 1300}
2470 1301
2471/*
2472 * Pause btcoex timer and bt duty cycle timer
2473 */
2474static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
2475{
2476 struct ath_btcoex *btcoex = &sc->btcoex;
2477 struct ath_hw *ah = sc->sc_ah;
2478
2479 del_timer_sync(&btcoex->period_timer);
2480
2481 if (btcoex->hw_timer_enabled)
2482 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
2483
2484 btcoex->hw_timer_enabled = false;
2485}
2486
2487static void ath9k_stop(struct ieee80211_hw *hw) 1302static void ath9k_stop(struct ieee80211_hw *hw)
2488{ 1303{
2489 struct ath_wiphy *aphy = hw->priv; 1304 struct ath_wiphy *aphy = hw->priv;
@@ -2550,12 +1365,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2550} 1365}
2551 1366
2552static int ath9k_add_interface(struct ieee80211_hw *hw, 1367static int ath9k_add_interface(struct ieee80211_hw *hw,
2553 struct ieee80211_if_init_conf *conf) 1368 struct ieee80211_vif *vif)
2554{ 1369{
2555 struct ath_wiphy *aphy = hw->priv; 1370 struct ath_wiphy *aphy = hw->priv;
2556 struct ath_softc *sc = aphy->sc; 1371 struct ath_softc *sc = aphy->sc;
2557 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1372 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2558 struct ath_vif *avp = (void *)conf->vif->drv_priv; 1373 struct ath_vif *avp = (void *)vif->drv_priv;
2559 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; 1374 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2560 int ret = 0; 1375 int ret = 0;
2561 1376
@@ -2567,7 +1382,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2567 goto out; 1382 goto out;
2568 } 1383 }
2569 1384
2570 switch (conf->type) { 1385 switch (vif->type) {
2571 case NL80211_IFTYPE_STATION: 1386 case NL80211_IFTYPE_STATION:
2572 ic_opmode = NL80211_IFTYPE_STATION; 1387 ic_opmode = NL80211_IFTYPE_STATION;
2573 break; 1388 break;
@@ -2578,11 +1393,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2578 ret = -ENOBUFS; 1393 ret = -ENOBUFS;
2579 goto out; 1394 goto out;
2580 } 1395 }
2581 ic_opmode = conf->type; 1396 ic_opmode = vif->type;
2582 break; 1397 break;
2583 default: 1398 default:
2584 ath_print(common, ATH_DBG_FATAL, 1399 ath_print(common, ATH_DBG_FATAL,
2585 "Interface type %d not yet supported\n", conf->type); 1400 "Interface type %d not yet supported\n", vif->type);
2586 ret = -EOPNOTSUPP; 1401 ret = -EOPNOTSUPP;
2587 goto out; 1402 goto out;
2588 } 1403 }
@@ -2614,18 +1429,18 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2614 * Enable MIB interrupts when there are hardware phy counters. 1429 * Enable MIB interrupts when there are hardware phy counters.
2615 * Note we only do this (at the moment) for station mode. 1430 * Note we only do this (at the moment) for station mode.
2616 */ 1431 */
2617 if ((conf->type == NL80211_IFTYPE_STATION) || 1432 if ((vif->type == NL80211_IFTYPE_STATION) ||
2618 (conf->type == NL80211_IFTYPE_ADHOC) || 1433 (vif->type == NL80211_IFTYPE_ADHOC) ||
2619 (conf->type == NL80211_IFTYPE_MESH_POINT)) { 1434 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
2620 sc->imask |= ATH9K_INT_MIB; 1435 sc->imask |= ATH9K_INT_MIB;
2621 sc->imask |= ATH9K_INT_TSFOOR; 1436 sc->imask |= ATH9K_INT_TSFOOR;
2622 } 1437 }
2623 1438
2624 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 1439 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2625 1440
2626 if (conf->type == NL80211_IFTYPE_AP || 1441 if (vif->type == NL80211_IFTYPE_AP ||
2627 conf->type == NL80211_IFTYPE_ADHOC || 1442 vif->type == NL80211_IFTYPE_ADHOC ||
2628 conf->type == NL80211_IFTYPE_MONITOR) 1443 vif->type == NL80211_IFTYPE_MONITOR)
2629 ath_start_ani(common); 1444 ath_start_ani(common);
2630 1445
2631out: 1446out:
@@ -2634,12 +1449,12 @@ out:
2634} 1449}
2635 1450
2636static void ath9k_remove_interface(struct ieee80211_hw *hw, 1451static void ath9k_remove_interface(struct ieee80211_hw *hw,
2637 struct ieee80211_if_init_conf *conf) 1452 struct ieee80211_vif *vif)
2638{ 1453{
2639 struct ath_wiphy *aphy = hw->priv; 1454 struct ath_wiphy *aphy = hw->priv;
2640 struct ath_softc *sc = aphy->sc; 1455 struct ath_softc *sc = aphy->sc;
2641 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1456 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2642 struct ath_vif *avp = (void *)conf->vif->drv_priv; 1457 struct ath_vif *avp = (void *)vif->drv_priv;
2643 int i; 1458 int i;
2644 1459
2645 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1460 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
@@ -2662,7 +1477,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2662 sc->sc_flags &= ~SC_OP_BEACONS; 1477 sc->sc_flags &= ~SC_OP_BEACONS;
2663 1478
2664 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 1479 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
2665 if (sc->beacon.bslot[i] == conf->vif) { 1480 if (sc->beacon.bslot[i] == vif) {
2666 printk(KERN_DEBUG "%s: vif had allocated beacon " 1481 printk(KERN_DEBUG "%s: vif had allocated beacon "
2667 "slot\n", __func__); 1482 "slot\n", __func__);
2668 sc->beacon.bslot[i] = NULL; 1483 sc->beacon.bslot[i] = NULL;
@@ -2727,7 +1542,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2727 */ 1542 */
2728 if (changed & IEEE80211_CONF_CHANGE_PS) { 1543 if (changed & IEEE80211_CONF_CHANGE_PS) {
2729 if (conf->flags & IEEE80211_CONF_PS) { 1544 if (conf->flags & IEEE80211_CONF_PS) {
2730 sc->sc_flags |= SC_OP_PS_ENABLED; 1545 sc->ps_flags |= PS_ENABLED;
2731 if (!(ah->caps.hw_caps & 1546 if (!(ah->caps.hw_caps &
2732 ATH9K_HW_CAP_AUTOSLEEP)) { 1547 ATH9K_HW_CAP_AUTOSLEEP)) {
2733 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) { 1548 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -2740,23 +1555,23 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2740 * At this point we know hardware has received an ACK 1555 * At this point we know hardware has received an ACK
2741 * of a previously sent null data frame. 1556 * of a previously sent null data frame.
2742 */ 1557 */
2743 if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) { 1558 if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) {
2744 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; 1559 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
2745 sc->ps_enabled = true; 1560 sc->ps_enabled = true;
2746 ath9k_hw_setrxabort(sc->sc_ah, 1); 1561 ath9k_hw_setrxabort(sc->sc_ah, 1);
2747 } 1562 }
2748 } else { 1563 } else {
2749 sc->ps_enabled = false; 1564 sc->ps_enabled = false;
2750 sc->sc_flags &= ~(SC_OP_PS_ENABLED | 1565 sc->ps_flags &= ~(PS_ENABLED |
2751 SC_OP_NULLFUNC_COMPLETED); 1566 PS_NULLFUNC_COMPLETED);
2752 ath9k_setpower(sc, ATH9K_PM_AWAKE); 1567 ath9k_setpower(sc, ATH9K_PM_AWAKE);
2753 if (!(ah->caps.hw_caps & 1568 if (!(ah->caps.hw_caps &
2754 ATH9K_HW_CAP_AUTOSLEEP)) { 1569 ATH9K_HW_CAP_AUTOSLEEP)) {
2755 ath9k_hw_setrxabort(sc->sc_ah, 0); 1570 ath9k_hw_setrxabort(sc->sc_ah, 0);
2756 sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON | 1571 sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
2757 SC_OP_WAIT_FOR_CAB | 1572 PS_WAIT_FOR_CAB |
2758 SC_OP_WAIT_FOR_PSPOLL_DATA | 1573 PS_WAIT_FOR_PSPOLL_DATA |
2759 SC_OP_WAIT_FOR_TX_ACK); 1574 PS_WAIT_FOR_TX_ACK);
2760 if (sc->imask & ATH9K_INT_TIM_TIMER) { 1575 if (sc->imask & ATH9K_INT_TIM_TIMER) {
2761 sc->imask &= ~ATH9K_INT_TIM_TIMER; 1576 sc->imask &= ~ATH9K_INT_TIM_TIMER;
2762 ath9k_hw_set_interrupts(sc->sc_ah, 1577 ath9k_hw_set_interrupts(sc->sc_ah,
@@ -2766,6 +1581,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2766 } 1581 }
2767 } 1582 }
2768 1583
1584 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1585 if (conf->flags & IEEE80211_CONF_MONITOR) {
1586 ath_print(common, ATH_DBG_CONFIG,
1587 "HW opmode set to Monitor mode\n");
1588 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1589 }
1590 }
1591
2769 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1592 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2770 struct ieee80211_channel *curchan = hw->conf.channel; 1593 struct ieee80211_channel *curchan = hw->conf.channel;
2771 int pos = curchan->hw_value; 1594 int pos = curchan->hw_value;
@@ -2966,6 +1789,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2966 struct ath_hw *ah = sc->sc_ah; 1789 struct ath_hw *ah = sc->sc_ah;
2967 struct ath_common *common = ath9k_hw_common(ah); 1790 struct ath_common *common = ath9k_hw_common(ah);
2968 struct ath_vif *avp = (void *)vif->drv_priv; 1791 struct ath_vif *avp = (void *)vif->drv_priv;
1792 int slottime;
2969 int error; 1793 int error;
2970 1794
2971 mutex_lock(&sc->mutex); 1795 mutex_lock(&sc->mutex);
@@ -3001,6 +1825,25 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
3001 ath_beacon_config(sc, vif); 1825 ath_beacon_config(sc, vif);
3002 } 1826 }
3003 1827
1828 if (changed & BSS_CHANGED_ERP_SLOT) {
1829 if (bss_conf->use_short_slot)
1830 slottime = 9;
1831 else
1832 slottime = 20;
1833 if (vif->type == NL80211_IFTYPE_AP) {
1834 /*
1835 * Defer update, so that connected stations can adjust
1836 * their settings at the same time.
1837 * See beacon.c for more details
1838 */
1839 sc->beacon.slottime = slottime;
1840 sc->beacon.updateslot = UPDATE;
1841 } else {
1842 ah->slottime = slottime;
1843 ath9k_hw_init_global_settings(ah);
1844 }
1845 }
1846
3004 /* Disable transmission of beacons */ 1847 /* Disable transmission of beacons */
3005 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) 1848 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
3006 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1849 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
@@ -3133,6 +1976,7 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
3133{ 1976{
3134 struct ath_wiphy *aphy = hw->priv; 1977 struct ath_wiphy *aphy = hw->priv;
3135 struct ath_softc *sc = aphy->sc; 1978 struct ath_softc *sc = aphy->sc;
1979 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
3136 1980
3137 mutex_lock(&sc->mutex); 1981 mutex_lock(&sc->mutex);
3138 if (ath9k_wiphy_scanning(sc)) { 1982 if (ath9k_wiphy_scanning(sc)) {
@@ -3148,10 +1992,9 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
3148 1992
3149 aphy->state = ATH_WIPHY_SCAN; 1993 aphy->state = ATH_WIPHY_SCAN;
3150 ath9k_wiphy_pause_all_forced(sc, aphy); 1994 ath9k_wiphy_pause_all_forced(sc, aphy);
3151
3152 spin_lock_bh(&sc->ani_lock);
3153 sc->sc_flags |= SC_OP_SCANNING; 1995 sc->sc_flags |= SC_OP_SCANNING;
3154 spin_unlock_bh(&sc->ani_lock); 1996 del_timer_sync(&common->ani.timer);
1997 cancel_delayed_work_sync(&sc->tx_complete_work);
3155 mutex_unlock(&sc->mutex); 1998 mutex_unlock(&sc->mutex);
3156} 1999}
3157 2000
@@ -3159,17 +2002,30 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
3159{ 2002{
3160 struct ath_wiphy *aphy = hw->priv; 2003 struct ath_wiphy *aphy = hw->priv;
3161 struct ath_softc *sc = aphy->sc; 2004 struct ath_softc *sc = aphy->sc;
2005 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
3162 2006
3163 mutex_lock(&sc->mutex); 2007 mutex_lock(&sc->mutex);
3164 spin_lock_bh(&sc->ani_lock);
3165 aphy->state = ATH_WIPHY_ACTIVE; 2008 aphy->state = ATH_WIPHY_ACTIVE;
3166 sc->sc_flags &= ~SC_OP_SCANNING; 2009 sc->sc_flags &= ~SC_OP_SCANNING;
3167 sc->sc_flags |= SC_OP_FULL_RESET; 2010 sc->sc_flags |= SC_OP_FULL_RESET;
3168 spin_unlock_bh(&sc->ani_lock); 2011 ath_start_ani(common);
2012 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
3169 ath_beacon_config(sc, NULL); 2013 ath_beacon_config(sc, NULL);
3170 mutex_unlock(&sc->mutex); 2014 mutex_unlock(&sc->mutex);
3171} 2015}
3172 2016
2017static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2018{
2019 struct ath_wiphy *aphy = hw->priv;
2020 struct ath_softc *sc = aphy->sc;
2021 struct ath_hw *ah = sc->sc_ah;
2022
2023 mutex_lock(&sc->mutex);
2024 ah->coverage_class = coverage_class;
2025 ath9k_hw_init_global_settings(ah);
2026 mutex_unlock(&sc->mutex);
2027}
2028
3173struct ieee80211_ops ath9k_ops = { 2029struct ieee80211_ops ath9k_ops = {
3174 .tx = ath9k_tx, 2030 .tx = ath9k_tx,
3175 .start = ath9k_start, 2031 .start = ath9k_start,
@@ -3189,64 +2045,5 @@ struct ieee80211_ops ath9k_ops = {
3189 .sw_scan_start = ath9k_sw_scan_start, 2045 .sw_scan_start = ath9k_sw_scan_start,
3190 .sw_scan_complete = ath9k_sw_scan_complete, 2046 .sw_scan_complete = ath9k_sw_scan_complete,
3191 .rfkill_poll = ath9k_rfkill_poll_state, 2047 .rfkill_poll = ath9k_rfkill_poll_state,
2048 .set_coverage_class = ath9k_set_coverage_class,
3192}; 2049};
3193
3194static int __init ath9k_init(void)
3195{
3196 int error;
3197
3198 /* Register rate control algorithm */
3199 error = ath_rate_control_register();
3200 if (error != 0) {
3201 printk(KERN_ERR
3202 "ath9k: Unable to register rate control "
3203 "algorithm: %d\n",
3204 error);
3205 goto err_out;
3206 }
3207
3208 error = ath9k_debug_create_root();
3209 if (error) {
3210 printk(KERN_ERR
3211 "ath9k: Unable to create debugfs root: %d\n",
3212 error);
3213 goto err_rate_unregister;
3214 }
3215
3216 error = ath_pci_init();
3217 if (error < 0) {
3218 printk(KERN_ERR
3219 "ath9k: No PCI devices found, driver not installed.\n");
3220 error = -ENODEV;
3221 goto err_remove_root;
3222 }
3223
3224 error = ath_ahb_init();
3225 if (error < 0) {
3226 error = -ENODEV;
3227 goto err_pci_exit;
3228 }
3229
3230 return 0;
3231
3232 err_pci_exit:
3233 ath_pci_exit();
3234
3235 err_remove_root:
3236 ath9k_debug_remove_root();
3237 err_rate_unregister:
3238 ath_rate_control_unregister();
3239 err_out:
3240 return error;
3241}
3242module_init(ath9k_init);
3243
3244static void __exit ath9k_exit(void)
3245{
3246 ath_ahb_exit();
3247 ath_pci_exit();
3248 ath9k_debug_remove_root();
3249 ath_rate_control_unregister();
3250 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
3251}
3252module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f7af5ea5475..fe2c3a644a6 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,7 +18,7 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include "ath9k.h" 19#include "ath9k.h"
20 20
21static struct pci_device_id ath_pci_id_table[] __devinitdata = { 21static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
22 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ 22 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
23 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ 23 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
24 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ 24 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
@@ -113,25 +113,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
113 u16 subsysid; 113 u16 subsysid;
114 u32 val; 114 u32 val;
115 int ret = 0; 115 int ret = 0;
116 struct ath_hw *ah;
117 char hw_name[64]; 116 char hw_name[64];
118 117
119 if (pci_enable_device(pdev)) 118 if (pci_enable_device(pdev))
120 return -EIO; 119 return -EIO;
121 120
122 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 121 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
123
124 if (ret) { 122 if (ret) {
125 printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); 123 printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
126 goto bad; 124 goto err_dma;
127 } 125 }
128 126
129 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 127 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
130
131 if (ret) { 128 if (ret) {
132 printk(KERN_ERR "ath9k: 32-bit DMA consistent " 129 printk(KERN_ERR "ath9k: 32-bit DMA consistent "
133 "DMA enable failed\n"); 130 "DMA enable failed\n");
134 goto bad; 131 goto err_dma;
135 } 132 }
136 133
137 /* 134 /*
@@ -171,22 +168,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
171 if (ret) { 168 if (ret) {
172 dev_err(&pdev->dev, "PCI memory region reserve error\n"); 169 dev_err(&pdev->dev, "PCI memory region reserve error\n");
173 ret = -ENODEV; 170 ret = -ENODEV;
174 goto bad; 171 goto err_region;
175 } 172 }
176 173
177 mem = pci_iomap(pdev, 0, 0); 174 mem = pci_iomap(pdev, 0, 0);
178 if (!mem) { 175 if (!mem) {
179 printk(KERN_ERR "PCI memory map error\n") ; 176 printk(KERN_ERR "PCI memory map error\n") ;
180 ret = -EIO; 177 ret = -EIO;
181 goto bad1; 178 goto err_iomap;
182 } 179 }
183 180
184 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + 181 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
185 sizeof(struct ath_softc), &ath9k_ops); 182 sizeof(struct ath_softc), &ath9k_ops);
186 if (!hw) { 183 if (!hw) {
187 dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); 184 dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
188 ret = -ENOMEM; 185 ret = -ENOMEM;
189 goto bad2; 186 goto err_alloc_hw;
190 } 187 }
191 188
192 SET_IEEE80211_DEV(hw, &pdev->dev); 189 SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -201,25 +198,25 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
201 sc->dev = &pdev->dev; 198 sc->dev = &pdev->dev;
202 sc->mem = mem; 199 sc->mem = mem;
203 200
204 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid); 201 /* Will be cleared in ath9k_start() */
205 ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops); 202 sc->sc_flags |= SC_OP_INVALID;
206 if (ret) {
207 dev_err(&pdev->dev, "failed to initialize device\n");
208 goto bad3;
209 }
210
211 /* setup interrupt service routine */
212 203
213 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); 204 ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
214 if (ret) { 205 if (ret) {
215 dev_err(&pdev->dev, "request_irq failed\n"); 206 dev_err(&pdev->dev, "request_irq failed\n");
216 goto bad4; 207 goto err_irq;
217 } 208 }
218 209
219 sc->irq = pdev->irq; 210 sc->irq = pdev->irq;
220 211
221 ah = sc->sc_ah; 212 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
222 ath9k_hw_name(ah, hw_name, sizeof(hw_name)); 213 ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
214 if (ret) {
215 dev_err(&pdev->dev, "Failed to initialize device\n");
216 goto err_init;
217 }
218
219 ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
223 printk(KERN_INFO 220 printk(KERN_INFO
224 "%s: %s mem=0x%lx, irq=%d\n", 221 "%s: %s mem=0x%lx, irq=%d\n",
225 wiphy_name(hw->wiphy), 222 wiphy_name(hw->wiphy),
@@ -227,15 +224,18 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
227 (unsigned long)mem, pdev->irq); 224 (unsigned long)mem, pdev->irq);
228 225
229 return 0; 226 return 0;
230bad4: 227
231 ath_detach(sc); 228err_init:
232bad3: 229 free_irq(sc->irq, sc);
230err_irq:
233 ieee80211_free_hw(hw); 231 ieee80211_free_hw(hw);
234bad2: 232err_alloc_hw:
235 pci_iounmap(pdev, mem); 233 pci_iounmap(pdev, mem);
236bad1: 234err_iomap:
237 pci_release_region(pdev, 0); 235 pci_release_region(pdev, 0);
238bad: 236err_region:
237 /* Nothing */
238err_dma:
239 pci_disable_device(pdev); 239 pci_disable_device(pdev);
240 return ret; 240 return ret;
241} 241}
@@ -245,8 +245,12 @@ static void ath_pci_remove(struct pci_dev *pdev)
245 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 245 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
246 struct ath_wiphy *aphy = hw->priv; 246 struct ath_wiphy *aphy = hw->priv;
247 struct ath_softc *sc = aphy->sc; 247 struct ath_softc *sc = aphy->sc;
248 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
248 249
249 ath_cleanup(sc); 250 ath9k_deinit_device(sc);
251 free_irq(sc->irq, sc);
252 ieee80211_free_hw(sc->hw);
253 ath_bus_cleanup(common);
250} 254}
251 255
252#ifdef CONFIG_PM 256#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 9eb96f50699..4f6d6fd442f 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -57,6 +57,10 @@ enum {
57 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 57 || (_phy == WLAN_RC_PHY_HT_40_DS) \
58 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 58 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
59 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 59 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
60#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
61 || (_phy == WLAN_RC_PHY_HT_20_DS) \
62 || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
63 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI))
60#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \ 64#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
61 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 65 || (_phy == WLAN_RC_PHY_HT_40_DS) \
62 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ 66 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 477365e5ae6..40b5d05edcc 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -364,10 +364,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
364 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 364 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
365 return; /* not from our current AP */ 365 return; /* not from our current AP */
366 366
367 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON; 367 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
368 368
369 if (sc->sc_flags & SC_OP_BEACON_SYNC) { 369 if (sc->ps_flags & PS_BEACON_SYNC) {
370 sc->sc_flags &= ~SC_OP_BEACON_SYNC; 370 sc->ps_flags &= ~PS_BEACON_SYNC;
371 ath_print(common, ATH_DBG_PS, 371 ath_print(common, ATH_DBG_PS,
372 "Reconfigure Beacon timers based on " 372 "Reconfigure Beacon timers based on "
373 "timestamp from the AP\n"); 373 "timestamp from the AP\n");
@@ -384,17 +384,17 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
384 */ 384 */
385 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " 385 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
386 "buffered broadcast/multicast frame(s)\n"); 386 "buffered broadcast/multicast frame(s)\n");
387 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON; 387 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
388 return; 388 return;
389 } 389 }
390 390
391 if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) { 391 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
392 /* 392 /*
393 * This can happen if a broadcast frame is dropped or the AP 393 * This can happen if a broadcast frame is dropped or the AP
394 * fails to send a frame indicating that all CAB frames have 394 * fails to send a frame indicating that all CAB frames have
395 * been delivered. 395 * been delivered.
396 */ 396 */
397 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 397 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
398 ath_print(common, ATH_DBG_PS, 398 ath_print(common, ATH_DBG_PS,
399 "PS wait for CAB frames timed out\n"); 399 "PS wait for CAB frames timed out\n");
400 } 400 }
@@ -408,10 +408,10 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
408 hdr = (struct ieee80211_hdr *)skb->data; 408 hdr = (struct ieee80211_hdr *)skb->data;
409 409
410 /* Process Beacon and CAB receive in PS state */ 410 /* Process Beacon and CAB receive in PS state */
411 if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) && 411 if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
412 ieee80211_is_beacon(hdr->frame_control)) 412 ieee80211_is_beacon(hdr->frame_control))
413 ath_rx_ps_beacon(sc, skb); 413 ath_rx_ps_beacon(sc, skb);
414 else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) && 414 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
415 (ieee80211_is_data(hdr->frame_control) || 415 (ieee80211_is_data(hdr->frame_control) ||
416 ieee80211_is_action(hdr->frame_control)) && 416 ieee80211_is_action(hdr->frame_control)) &&
417 is_multicast_ether_addr(hdr->addr1) && 417 is_multicast_ether_addr(hdr->addr1) &&
@@ -420,20 +420,20 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
420 * No more broadcast/multicast frames to be received at this 420 * No more broadcast/multicast frames to be received at this
421 * point. 421 * point.
422 */ 422 */
423 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 423 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
424 ath_print(common, ATH_DBG_PS, 424 ath_print(common, ATH_DBG_PS,
425 "All PS CAB frames received, back to sleep\n"); 425 "All PS CAB frames received, back to sleep\n");
426 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) && 426 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
427 !is_multicast_ether_addr(hdr->addr1) && 427 !is_multicast_ether_addr(hdr->addr1) &&
428 !ieee80211_has_morefrags(hdr->frame_control)) { 428 !ieee80211_has_morefrags(hdr->frame_control)) {
429 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA; 429 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
430 ath_print(common, ATH_DBG_PS, 430 ath_print(common, ATH_DBG_PS,
431 "Going back to sleep after having received " 431 "Going back to sleep after having received "
432 "PS-Poll data (0x%x)\n", 432 "PS-Poll data (0x%x)\n",
433 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 433 sc->ps_flags & (PS_WAIT_FOR_BEACON |
434 SC_OP_WAIT_FOR_CAB | 434 PS_WAIT_FOR_CAB |
435 SC_OP_WAIT_FOR_PSPOLL_DATA | 435 PS_WAIT_FOR_PSPOLL_DATA |
436 SC_OP_WAIT_FOR_TX_ACK)); 436 PS_WAIT_FOR_TX_ACK));
437 } 437 }
438} 438}
439 439
@@ -571,6 +571,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
571 hw = ath_get_virt_hw(sc, hdr); 571 hw = ath_get_virt_hw(sc, hdr);
572 rx_stats = &ds->ds_rxstat; 572 rx_stats = &ds->ds_rxstat;
573 573
574 ath_debug_stat_rx(sc, bf);
575
574 /* 576 /*
575 * If we're asked to flush receive queue, directly 577 * If we're asked to flush receive queue, directly
576 * chain it back at the queue without processing it. 578 * chain it back at the queue without processing it.
@@ -631,9 +633,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
631 sc->rx.rxotherant = 0; 633 sc->rx.rxotherant = 0;
632 } 634 }
633 635
634 if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 636 if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
635 SC_OP_WAIT_FOR_CAB | 637 PS_WAIT_FOR_CAB |
636 SC_OP_WAIT_FOR_PSPOLL_DATA))) 638 PS_WAIT_FOR_PSPOLL_DATA)))
637 ath_rx_ps(sc, skb); 639 ath_rx_ps(sc, skb);
638 640
639 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 641 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index cd26caaf44e..a43fbf84dab 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -152,7 +152,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
152 152
153 SET_IEEE80211_PERM_ADDR(hw, addr); 153 SET_IEEE80211_PERM_ADDR(hw, addr);
154 154
155 ath_set_hw_capab(sc, hw); 155 ath9k_set_hw_capab(sc, hw);
156 156
157 error = ieee80211_register_hw(hw); 157 error = ieee80211_register_hw(hw);
158 158
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fa12b9060b0..a821bb687b3 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1648,7 +1648,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1648 /* tag if this is a nullfunc frame to enable PS when AP acks it */ 1648 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1649 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) { 1649 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1650 bf->bf_isnullfunc = true; 1650 bf->bf_isnullfunc = true;
1651 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; 1651 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
1652 } else 1652 } else
1653 bf->bf_isnullfunc = false; 1653 bf->bf_isnullfunc = false;
1654 1654
@@ -1858,15 +1858,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1858 skb_pull(skb, padsize); 1858 skb_pull(skb, padsize);
1859 } 1859 }
1860 1860
1861 if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) { 1861 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1862 sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK; 1862 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
1863 ath_print(common, ATH_DBG_PS, 1863 ath_print(common, ATH_DBG_PS,
1864 "Going back to sleep after having " 1864 "Going back to sleep after having "
1865 "received TX status (0x%x)\n", 1865 "received TX status (0x%x)\n",
1866 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 1866 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1867 SC_OP_WAIT_FOR_CAB | 1867 PS_WAIT_FOR_CAB |
1868 SC_OP_WAIT_FOR_PSPOLL_DATA | 1868 PS_WAIT_FOR_PSPOLL_DATA |
1869 SC_OP_WAIT_FOR_TX_ACK)); 1869 PS_WAIT_FOR_TX_ACK));
1870 } 1870 }
1871 1871
1872 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1872 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
@@ -2053,11 +2053,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2053 */ 2053 */
2054 if (bf->bf_isnullfunc && 2054 if (bf->bf_isnullfunc &&
2055 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) { 2055 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
2056 if ((sc->sc_flags & SC_OP_PS_ENABLED)) { 2056 if ((sc->ps_flags & PS_ENABLED)) {
2057 sc->ps_enabled = true; 2057 sc->ps_enabled = true;
2058 ath9k_hw_setrxabort(sc->sc_ah, 1); 2058 ath9k_hw_setrxabort(sc->sc_ah, 1);
2059 } else 2059 } else
2060 sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED; 2060 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2061 } 2061 }
2062 2062
2063 /* 2063 /*
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 92f87fbe750..9ab1192004c 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); 32MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
33 33
34static struct pci_device_id card_ids[] = { 34static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
35 { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID }, 35 { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
36 { 0, } 36 { 0, }
37}; 37};
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64c12e1bced..073be566d05 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -3,6 +3,7 @@ config B43
3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA 3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
4 select SSB 4 select SSB
5 select FW_LOADER 5 select FW_LOADER
6 select SSB_BLOCKIO
6 ---help--- 7 ---help---
7 b43 is a driver for the Broadcom 43xx series wireless devices. 8 b43 is a driver for the Broadcom 43xx series wireless devices.
8 9
@@ -78,14 +79,6 @@ config B43_SDIO
78 79
79 If unsure, say N. 80 If unsure, say N.
80 81
81# Data transfers to the device via PIO
82# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
83config B43_PIO
84 bool
85 depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
86 select SSB_BLOCKIO
87 default y
88
89config B43_NPHY 82config B43_NPHY
90 bool "Pre IEEE 802.11n support (BROKEN)" 83 bool "Pre IEEE 802.11n support (BROKEN)"
91 depends on B43 && EXPERIMENTAL && BROKEN 84 depends on B43 && EXPERIMENTAL && BROKEN
@@ -137,12 +130,4 @@ config B43_DEBUG
137 for production use. 130 for production use.
138 Only say Y, if you are debugging a problem in the b43 driver sourcecode. 131 Only say Y, if you are debugging a problem in the b43 driver sourcecode.
139 132
140config B43_FORCE_PIO
141 bool "Force usage of PIO instead of DMA"
142 depends on B43 && B43_DEBUG
143 ---help---
144 This will disable DMA and always enable PIO instead.
145 133
146 Say N!
147 This is only for debugging the PIO engine code. You do
148 _NOT_ want to enable this.
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 84772a2542d..5e83b6f0a3a 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -12,7 +12,7 @@ b43-y += xmit.o
12b43-y += lo.o 12b43-y += lo.o
13b43-y += wa.o 13b43-y += wa.o
14b43-y += dma.o 14b43-y += dma.o
15b43-$(CONFIG_B43_PIO) += pio.o 15b43-y += pio.o
16b43-y += rfkill.o 16b43-y += rfkill.o
17b43-$(CONFIG_B43_LEDS) += leds.o 17b43-$(CONFIG_B43_LEDS) += leds.o
18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o 18b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf949199..54d6085a887 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -253,6 +253,14 @@ enum {
253#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */ 253#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */
254#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */ 254#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */
255#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */ 255#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */
256/* SHM_SHARED tx iq workarounds */
257#define B43_SHM_SH_NPHY_TXIQW0 0x0700
258#define B43_SHM_SH_NPHY_TXIQW1 0x0702
259#define B43_SHM_SH_NPHY_TXIQW2 0x0704
260#define B43_SHM_SH_NPHY_TXIQW3 0x0706
261/* SHM_SHARED tx pwr ctrl */
262#define B43_SHM_SH_NPHY_TXPWR_INDX0 0x0708
263#define B43_SHM_SH_NPHY_TXPWR_INDX1 0x070E
256 264
257/* SHM_SCRATCH offsets */ 265/* SHM_SCRATCH offsets */
258#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */ 266#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */
@@ -821,11 +829,9 @@ struct b43_wl {
821 /* The device LEDs. */ 829 /* The device LEDs. */
822 struct b43_leds leds; 830 struct b43_leds leds;
823 831
824#ifdef CONFIG_B43_PIO
825 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */ 832 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
826 u8 pio_scratchspace[110] __attribute__((__aligned__(8))); 833 u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
827 u8 pio_tailspace[4] __attribute__((__aligned__(8))); 834 u8 pio_tailspace[4] __attribute__((__aligned__(8)));
828#endif /* CONFIG_B43_PIO */
829}; 835};
830 836
831static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) 837static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
@@ -876,20 +882,9 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
876 882
877static inline bool b43_using_pio_transfers(struct b43_wldev *dev) 883static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
878{ 884{
879#ifdef CONFIG_B43_PIO
880 return dev->__using_pio_transfers; 885 return dev->__using_pio_transfers;
881#else
882 return 0;
883#endif
884} 886}
885 887
886#ifdef CONFIG_B43_FORCE_PIO
887# define B43_FORCE_PIO 1
888#else
889# define B43_FORCE_PIO 0
890#endif
891
892
893/* Message printing */ 888/* Message printing */
894void b43info(struct b43_wl *wl, const char *fmt, ...) 889void b43info(struct b43_wl *wl, const char *fmt, ...)
895 __attribute__ ((format(printf, 2, 3))); 890 __attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 88d1fd02d40..615af22c49f 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1653,7 +1653,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
1653 b43_power_saving_ctl_bits(dev, 0); 1653 b43_power_saving_ctl_bits(dev, 0);
1654} 1654}
1655 1655
1656#ifdef CONFIG_B43_PIO
1657static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, 1656static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1658 u16 mmio_base, bool enable) 1657 u16 mmio_base, bool enable)
1659{ 1658{
@@ -1687,4 +1686,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1687 mmio_base = b43_dmacontroller_base(type, engine_index); 1686 mmio_base = b43_dmacontroller_base(type, engine_index);
1688 direct_fifo_rx(dev, type, mmio_base, enable); 1687 direct_fifo_rx(dev, type, mmio_base, enable);
1689} 1688}
1690#endif /* CONFIG_B43_PIO */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe44f2..9c5c7c9ad53 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -67,7 +67,12 @@ MODULE_AUTHOR("Gábor Stefanik");
67MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
68 68
69MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID); 69MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
70 70MODULE_FIRMWARE("b43/ucode11.fw");
71MODULE_FIRMWARE("b43/ucode13.fw");
72MODULE_FIRMWARE("b43/ucode14.fw");
73MODULE_FIRMWARE("b43/ucode15.fw");
74MODULE_FIRMWARE("b43/ucode5.fw");
75MODULE_FIRMWARE("b43/ucode9.fw");
71 76
72static int modparam_bad_frames_preempt; 77static int modparam_bad_frames_preempt;
73module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); 78module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
@@ -102,6 +107,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
102module_param_named(verbose, b43_modparam_verbose, int, 0644); 107module_param_named(verbose, b43_modparam_verbose, int, 0644);
103MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); 108MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
104 109
110static int modparam_pio;
111module_param_named(pio, modparam_pio, int, 0444);
112MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode");
105 113
106static const struct ssb_device_id b43_ssb_tbl[] = { 114static const struct ssb_device_id b43_ssb_tbl[] = {
107 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), 115 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -110,6 +118,7 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
110 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9), 118 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9),
111 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10), 119 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10),
112 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11), 120 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11),
121 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 12),
113 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13), 122 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
114 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15), 123 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15),
115 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16), 124 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
@@ -1786,8 +1795,8 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1786 dma_reason[4], dma_reason[5]); 1795 dma_reason[4], dma_reason[5]);
1787 b43err(dev->wl, "This device does not support DMA " 1796 b43err(dev->wl, "This device does not support DMA "
1788 "on your system. Please use PIO instead.\n"); 1797 "on your system. Please use PIO instead.\n");
1789 b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in " 1798 b43err(dev->wl, "Unload the b43 module and reload "
1790 "your kernel configuration.\n"); 1799 "with 'pio=1'\n");
1791 return; 1800 return;
1792 } 1801 }
1793 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { 1802 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
@@ -4353,7 +4362,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4353 4362
4354 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || 4363 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
4355 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || 4364 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
4356 B43_FORCE_PIO) { 4365 modparam_pio) {
4357 dev->__using_pio_transfers = 1; 4366 dev->__using_pio_transfers = 1;
4358 err = b43_pio_init(dev); 4367 err = b43_pio_init(dev);
4359 } else { 4368 } else {
@@ -4388,7 +4397,7 @@ err_busdown:
4388} 4397}
4389 4398
4390static int b43_op_add_interface(struct ieee80211_hw *hw, 4399static int b43_op_add_interface(struct ieee80211_hw *hw,
4391 struct ieee80211_if_init_conf *conf) 4400 struct ieee80211_vif *vif)
4392{ 4401{
4393 struct b43_wl *wl = hw_to_b43_wl(hw); 4402 struct b43_wl *wl = hw_to_b43_wl(hw);
4394 struct b43_wldev *dev; 4403 struct b43_wldev *dev;
@@ -4396,24 +4405,24 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4396 4405
4397 /* TODO: allow WDS/AP devices to coexist */ 4406 /* TODO: allow WDS/AP devices to coexist */
4398 4407
4399 if (conf->type != NL80211_IFTYPE_AP && 4408 if (vif->type != NL80211_IFTYPE_AP &&
4400 conf->type != NL80211_IFTYPE_MESH_POINT && 4409 vif->type != NL80211_IFTYPE_MESH_POINT &&
4401 conf->type != NL80211_IFTYPE_STATION && 4410 vif->type != NL80211_IFTYPE_STATION &&
4402 conf->type != NL80211_IFTYPE_WDS && 4411 vif->type != NL80211_IFTYPE_WDS &&
4403 conf->type != NL80211_IFTYPE_ADHOC) 4412 vif->type != NL80211_IFTYPE_ADHOC)
4404 return -EOPNOTSUPP; 4413 return -EOPNOTSUPP;
4405 4414
4406 mutex_lock(&wl->mutex); 4415 mutex_lock(&wl->mutex);
4407 if (wl->operating) 4416 if (wl->operating)
4408 goto out_mutex_unlock; 4417 goto out_mutex_unlock;
4409 4418
4410 b43dbg(wl, "Adding Interface type %d\n", conf->type); 4419 b43dbg(wl, "Adding Interface type %d\n", vif->type);
4411 4420
4412 dev = wl->current_dev; 4421 dev = wl->current_dev;
4413 wl->operating = 1; 4422 wl->operating = 1;
4414 wl->vif = conf->vif; 4423 wl->vif = vif;
4415 wl->if_type = conf->type; 4424 wl->if_type = vif->type;
4416 memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); 4425 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
4417 4426
4418 b43_adjust_opmode(dev); 4427 b43_adjust_opmode(dev);
4419 b43_set_pretbtt(dev); 4428 b43_set_pretbtt(dev);
@@ -4428,17 +4437,17 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4428} 4437}
4429 4438
4430static void b43_op_remove_interface(struct ieee80211_hw *hw, 4439static void b43_op_remove_interface(struct ieee80211_hw *hw,
4431 struct ieee80211_if_init_conf *conf) 4440 struct ieee80211_vif *vif)
4432{ 4441{
4433 struct b43_wl *wl = hw_to_b43_wl(hw); 4442 struct b43_wl *wl = hw_to_b43_wl(hw);
4434 struct b43_wldev *dev = wl->current_dev; 4443 struct b43_wldev *dev = wl->current_dev;
4435 4444
4436 b43dbg(wl, "Removing Interface type %d\n", conf->type); 4445 b43dbg(wl, "Removing Interface type %d\n", vif->type);
4437 4446
4438 mutex_lock(&wl->mutex); 4447 mutex_lock(&wl->mutex);
4439 4448
4440 B43_WARN_ON(!wl->operating); 4449 B43_WARN_ON(!wl->operating);
4441 B43_WARN_ON(wl->vif != conf->vif); 4450 B43_WARN_ON(wl->vif != vif);
4442 wl->vif = NULL; 4451 wl->vif = NULL;
4443 4452
4444 wl->operating = 0; 4453 wl->operating = 0;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e046ec1ff8..b58d6cf2658 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -80,6 +80,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
80 dev->phy.lp = NULL; 80 dev->phy.lp = NULL;
81} 81}
82 82
83/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
83static void lpphy_read_band_sprom(struct b43_wldev *dev) 84static void lpphy_read_band_sprom(struct b43_wldev *dev)
84{ 85{
85 struct b43_phy_lp *lpphy = dev->phy.lp; 86 struct b43_phy_lp *lpphy = dev->phy.lp;
@@ -101,6 +102,12 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
101 maxpwr = bus->sprom.maxpwr_bg; 102 maxpwr = bus->sprom.maxpwr_bg;
102 lpphy->max_tx_pwr_med_band = maxpwr; 103 lpphy->max_tx_pwr_med_band = maxpwr;
103 cckpo = bus->sprom.cck2gpo; 104 cckpo = bus->sprom.cck2gpo;
105 /*
106 * We don't read SPROM's opo as specs say. On rev8 SPROMs
107 * opo == ofdm2gpo and we don't know any SSB with LP-PHY
108 * and SPROM rev below 8.
109 */
110 B43_WARN_ON(bus->sprom.revision < 8);
104 ofdmpo = bus->sprom.ofdm2gpo; 111 ofdmpo = bus->sprom.ofdm2gpo;
105 if (cckpo) { 112 if (cckpo) {
106 for (i = 0; i < 4; i++) { 113 for (i = 0; i < 4; i++) {
@@ -1703,19 +1710,6 @@ static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
1703 .c0 = 0, 1710 .c0 = 0,
1704}; 1711};
1705 1712
1706static u8 lpphy_nbits(s32 val)
1707{
1708 u32 tmp = abs(val);
1709 u8 nbits = 0;
1710
1711 while (tmp != 0) {
1712 nbits++;
1713 tmp >>= 1;
1714 }
1715
1716 return nbits;
1717}
1718
1719static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples) 1713static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
1720{ 1714{
1721 struct lpphy_iq_est iq_est; 1715 struct lpphy_iq_est iq_est;
@@ -1742,8 +1736,8 @@ static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
1742 goto out; 1736 goto out;
1743 } 1737 }
1744 1738
1745 prod_msb = lpphy_nbits(prod); 1739 prod_msb = fls(abs(prod));
1746 q_msb = lpphy_nbits(qpwr); 1740 q_msb = fls(abs(qpwr));
1747 tmp1 = prod_msb - 20; 1741 tmp1 = prod_msb - 20;
1748 1742
1749 if (tmp1 >= 0) { 1743 if (tmp1 >= 0) {
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 992318a7807..4a817e3da16 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -28,7 +28,32 @@
28#include "b43.h" 28#include "b43.h"
29#include "phy_n.h" 29#include "phy_n.h"
30#include "tables_nphy.h" 30#include "tables_nphy.h"
31#include "main.h"
31 32
33struct nphy_txgains {
34 u16 txgm[2];
35 u16 pga[2];
36 u16 pad[2];
37 u16 ipa[2];
38};
39
40struct nphy_iqcal_params {
41 u16 txgm;
42 u16 pga;
43 u16 pad;
44 u16 ipa;
45 u16 cal_gain;
46 u16 ncorr[5];
47};
48
49struct nphy_iq_est {
50 s32 iq0_prod;
51 u32 i0_pwr;
52 u32 q0_pwr;
53 s32 iq1_prod;
54 u32 i1_pwr;
55 u32 q1_pwr;
56};
32 57
33void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) 58void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
34{//TODO 59{//TODO
@@ -197,44 +222,16 @@ void b43_nphy_radio_turn_off(struct b43_wldev *dev)
197 ~B43_NPHY_RFCTL_CMD_EN); 222 ~B43_NPHY_RFCTL_CMD_EN);
198} 223}
199 224
200#define ntab_upload(dev, offset, data) do { \ 225/*
201 unsigned int i; \ 226 * Upload the N-PHY tables.
202 for (i = 0; i < (offset##_SIZE); i++) \ 227 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
203 b43_ntab_write(dev, (offset) + i, (data)[i]); \ 228 */
204 } while (0)
205
206/* Upload the N-PHY tables. */
207static void b43_nphy_tables_init(struct b43_wldev *dev) 229static void b43_nphy_tables_init(struct b43_wldev *dev)
208{ 230{
209 /* Static tables */ 231 if (dev->phy.rev < 3)
210 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct); 232 b43_nphy_rev0_1_2_tables_init(dev);
211 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup); 233 else
212 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap); 234 b43_nphy_rev3plus_tables_init(dev);
213 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
214 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
215 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
216 ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
217 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
218 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
219 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
220 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
221 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
222 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
223 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
224
225 /* Volatile tables */
226 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
227 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
228 ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
229 ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
230 ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
231 ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
232 ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
233 ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
234 ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
235 ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
236 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
237 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
238} 235}
239 236
240static void b43_nphy_workarounds(struct b43_wldev *dev) 237static void b43_nphy_workarounds(struct b43_wldev *dev)
@@ -341,18 +338,386 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
341 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); 338 b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
342} 339}
343 340
341/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
342static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
343{
344 struct b43_phy_n *nphy = dev->phy.n;
345 enum ieee80211_band band;
346 u16 tmp;
347
348 if (!enable) {
349 nphy->rfctrl_intc1_save = b43_phy_read(dev,
350 B43_NPHY_RFCTL_INTC1);
351 nphy->rfctrl_intc2_save = b43_phy_read(dev,
352 B43_NPHY_RFCTL_INTC2);
353 band = b43_current_band(dev->wl);
354 if (dev->phy.rev >= 3) {
355 if (band == IEEE80211_BAND_5GHZ)
356 tmp = 0x600;
357 else
358 tmp = 0x480;
359 } else {
360 if (band == IEEE80211_BAND_5GHZ)
361 tmp = 0x180;
362 else
363 tmp = 0x120;
364 }
365 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
366 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
367 } else {
368 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
369 nphy->rfctrl_intc1_save);
370 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
371 nphy->rfctrl_intc2_save);
372 }
373}
374
375/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
376static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
377{
378 struct b43_phy_n *nphy = dev->phy.n;
379 u16 tmp;
380 enum ieee80211_band band = b43_current_band(dev->wl);
381 bool ipa = (nphy->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
382 (nphy->ipa5g_on && band == IEEE80211_BAND_5GHZ);
383
384 if (dev->phy.rev >= 3) {
385 if (ipa) {
386 tmp = 4;
387 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
388 (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
389 }
390
391 tmp = 1;
392 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
393 (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
394 }
395}
396
397/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
398static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
399{
400 u32 tmslow;
401
402 if (dev->phy.type != B43_PHYTYPE_N)
403 return;
404
405 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
406 if (force)
407 tmslow |= SSB_TMSLOW_FGC;
408 else
409 tmslow &= ~SSB_TMSLOW_FGC;
410 ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
411}
412
413/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
344static void b43_nphy_reset_cca(struct b43_wldev *dev) 414static void b43_nphy_reset_cca(struct b43_wldev *dev)
345{ 415{
346 u16 bbcfg; 416 u16 bbcfg;
347 417
348 ssb_write32(dev->dev, SSB_TMSLOW, 418 b43_nphy_bmac_clock_fgc(dev, 1);
349 ssb_read32(dev->dev, SSB_TMSLOW) | SSB_TMSLOW_FGC);
350 bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); 419 bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
351 b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTCCA); 420 b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
352 b43_phy_write(dev, B43_NPHY_BBCFG, 421 udelay(1);
353 bbcfg & ~B43_NPHY_BBCFG_RSTCCA); 422 b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
354 ssb_write32(dev->dev, SSB_TMSLOW, 423 b43_nphy_bmac_clock_fgc(dev, 0);
355 ssb_read32(dev->dev, SSB_TMSLOW) & ~SSB_TMSLOW_FGC); 424 /* TODO: N PHY Force RF Seq with argument 2 */
425}
426
427/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
428static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
429 u16 samps, u8 time, bool wait)
430{
431 int i;
432 u16 tmp;
433
434 b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
435 b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
436 if (wait)
437 b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
438 else
439 b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
440
441 b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
442
443 for (i = 1000; i; i--) {
444 tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
445 if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
446 est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
447 b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
448 est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
449 b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
450 est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
451 b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
452
453 est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
454 b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
455 est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
456 b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
457 est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
458 b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
459 return;
460 }
461 udelay(10);
462 }
463 memset(est, 0, sizeof(*est));
464}
465
466/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
467static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
468 struct b43_phy_n_iq_comp *pcomp)
469{
470 if (write) {
471 b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
472 b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
473 b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
474 b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
475 } else {
476 pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
477 pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
478 pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
479 pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
480 }
481}
482
483/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
484static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
485{
486 int i;
487 s32 iq;
488 u32 ii;
489 u32 qq;
490 int iq_nbits, qq_nbits;
491 int arsh, brsh;
492 u16 tmp, a, b;
493
494 struct nphy_iq_est est;
495 struct b43_phy_n_iq_comp old;
496 struct b43_phy_n_iq_comp new = { };
497 bool error = false;
498
499 if (mask == 0)
500 return;
501
502 b43_nphy_rx_iq_coeffs(dev, false, &old);
503 b43_nphy_rx_iq_coeffs(dev, true, &new);
504 b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
505 new = old;
506
507 for (i = 0; i < 2; i++) {
508 if (i == 0 && (mask & 1)) {
509 iq = est.iq0_prod;
510 ii = est.i0_pwr;
511 qq = est.q0_pwr;
512 } else if (i == 1 && (mask & 2)) {
513 iq = est.iq1_prod;
514 ii = est.i1_pwr;
515 qq = est.q1_pwr;
516 } else {
517 B43_WARN_ON(1);
518 continue;
519 }
520
521 if (ii + qq < 2) {
522 error = true;
523 break;
524 }
525
526 iq_nbits = fls(abs(iq));
527 qq_nbits = fls(qq);
528
529 arsh = iq_nbits - 20;
530 if (arsh >= 0) {
531 a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
532 tmp = ii >> arsh;
533 } else {
534 a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
535 tmp = ii << -arsh;
536 }
537 if (tmp == 0) {
538 error = true;
539 break;
540 }
541 a /= tmp;
542
543 brsh = qq_nbits - 11;
544 if (brsh >= 0) {
545 b = (qq << (31 - qq_nbits));
546 tmp = ii >> brsh;
547 } else {
548 b = (qq << (31 - qq_nbits));
549 tmp = ii << -brsh;
550 }
551 if (tmp == 0) {
552 error = true;
553 break;
554 }
555 b = int_sqrt(b / tmp - a * a) - (1 << 10);
556
557 if (i == 0 && (mask & 0x1)) {
558 if (dev->phy.rev >= 3) {
559 new.a0 = a & 0x3FF;
560 new.b0 = b & 0x3FF;
561 } else {
562 new.a0 = b & 0x3FF;
563 new.b0 = a & 0x3FF;
564 }
565 } else if (i == 1 && (mask & 0x2)) {
566 if (dev->phy.rev >= 3) {
567 new.a1 = a & 0x3FF;
568 new.b1 = b & 0x3FF;
569 } else {
570 new.a1 = b & 0x3FF;
571 new.b1 = a & 0x3FF;
572 }
573 }
574 }
575
576 if (error)
577 new = old;
578
579 b43_nphy_rx_iq_coeffs(dev, true, &new);
580}
581
582/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
583static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
584{
585 u16 array[4];
586 int i;
587
588 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C50);
589 for (i = 0; i < 4; i++)
590 array[i] = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
591
592 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
593 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
594 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
595 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
596}
597
598/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
599static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st)
600{
601 b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
602 b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
603}
604
605/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
606static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
607{
608 clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
609 clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
610}
611
612/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
613static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
614{
615 u16 tmp;
616
617 if (dev->dev->id.revision == 16)
618 b43_mac_suspend(dev);
619
620 tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
621 tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
622 B43_NPHY_CLASSCTL_WAITEDEN);
623 tmp &= ~mask;
624 tmp |= (val & mask);
625 b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
626
627 if (dev->dev->id.revision == 16)
628 b43_mac_enable(dev);
629
630 return tmp;
631}
632
633/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
634static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
635{
636 struct b43_phy *phy = &dev->phy;
637 struct b43_phy_n *nphy = phy->n;
638
639 if (enable) {
640 u16 clip[] = { 0xFFFF, 0xFFFF };
641 if (nphy->deaf_count++ == 0) {
642 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
643 b43_nphy_classifier(dev, 0x7, 0);
644 b43_nphy_read_clip_detection(dev, nphy->clip_state);
645 b43_nphy_write_clip_detection(dev, clip);
646 }
647 b43_nphy_reset_cca(dev);
648 } else {
649 if (--nphy->deaf_count == 0) {
650 b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
651 b43_nphy_write_clip_detection(dev, nphy->clip_state);
652 }
653 }
654}
655
656/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
657static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
658{
659 struct b43_phy_n *nphy = dev->phy.n;
660 int i, j;
661 u32 tmp;
662 u32 cur_real, cur_imag, real_part, imag_part;
663
664 u16 buffer[7];
665
666 if (nphy->hang_avoid)
667 b43_nphy_stay_in_carrier_search(dev, true);
668
669 /* TODO: Read an N PHY Table with ID 15, length 7, offset 80,
670 width 16, and data pointer buffer */
671
672 for (i = 0; i < 2; i++) {
673 tmp = ((buffer[i * 2] & 0x3FF) << 10) |
674 (buffer[i * 2 + 1] & 0x3FF);
675 b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
676 (((i + 26) << 10) | 320));
677 for (j = 0; j < 128; j++) {
678 b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
679 ((tmp >> 16) & 0xFFFF));
680 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
681 (tmp & 0xFFFF));
682 }
683 }
684
685 for (i = 0; i < 2; i++) {
686 tmp = buffer[5 + i];
687 real_part = (tmp >> 8) & 0xFF;
688 imag_part = (tmp & 0xFF);
689 b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
690 (((i + 26) << 10) | 448));
691
692 if (dev->phy.rev >= 3) {
693 cur_real = real_part;
694 cur_imag = imag_part;
695 tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
696 }
697
698 for (j = 0; j < 128; j++) {
699 if (dev->phy.rev < 3) {
700 cur_real = (real_part * loscale[j] + 128) >> 8;
701 cur_imag = (imag_part * loscale[j] + 128) >> 8;
702 tmp = ((cur_real & 0xFF) << 8) |
703 (cur_imag & 0xFF);
704 }
705 b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
706 ((tmp >> 16) & 0xFFFF));
707 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
708 (tmp & 0xFFFF));
709 }
710 }
711
712 if (dev->phy.rev >= 3) {
713 b43_shm_write16(dev, B43_SHM_SHARED,
714 B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
715 b43_shm_write16(dev, B43_SHM_SHARED,
716 B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
717 }
718
719 if (nphy->hang_avoid)
720 b43_nphy_stay_in_carrier_search(dev, false);
356} 721}
357 722
358enum b43_nphy_rf_sequence { 723enum b43_nphy_rf_sequence {
@@ -411,81 +776,1339 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
411 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); 776 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
412} 777}
413 778
414/* RSSI Calibration */ 779/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
415static void b43_nphy_rssi_cal(struct b43_wldev *dev, u8 type) 780static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
781 s8 offset, u8 core, u8 rail, u8 type)
416{ 782{
417 //TODO 783 u16 tmp;
784 bool core1or5 = (core == 1) || (core == 5);
785 bool core2or5 = (core == 2) || (core == 5);
786
787 offset = clamp_val(offset, -32, 31);
788 tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
789
790 if (core1or5 && (rail == 0) && (type == 2))
791 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
792 if (core1or5 && (rail == 1) && (type == 2))
793 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
794 if (core2or5 && (rail == 0) && (type == 2))
795 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
796 if (core2or5 && (rail == 1) && (type == 2))
797 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
798 if (core1or5 && (rail == 0) && (type == 0))
799 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
800 if (core1or5 && (rail == 1) && (type == 0))
801 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
802 if (core2or5 && (rail == 0) && (type == 0))
803 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
804 if (core2or5 && (rail == 1) && (type == 0))
805 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
806 if (core1or5 && (rail == 0) && (type == 1))
807 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
808 if (core1or5 && (rail == 1) && (type == 1))
809 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
810 if (core2or5 && (rail == 0) && (type == 1))
811 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
812 if (core2or5 && (rail == 1) && (type == 1))
813 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
814 if (core1or5 && (rail == 0) && (type == 6))
815 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
816 if (core1or5 && (rail == 1) && (type == 6))
817 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
818 if (core2or5 && (rail == 0) && (type == 6))
819 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
820 if (core2or5 && (rail == 1) && (type == 6))
821 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
822 if (core1or5 && (rail == 0) && (type == 3))
823 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
824 if (core1or5 && (rail == 1) && (type == 3))
825 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
826 if (core2or5 && (rail == 0) && (type == 3))
827 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
828 if (core2or5 && (rail == 1) && (type == 3))
829 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
830 if (core1or5 && (type == 4))
831 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
832 if (core2or5 && (type == 4))
833 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
834 if (core1or5 && (type == 5))
835 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
836 if (core2or5 && (type == 5))
837 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
838}
839
840/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
841static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
842{
843 u16 val;
844
845 if (dev->phy.rev >= 3) {
846 /* TODO */
847 } else {
848 if (type < 3)
849 val = 0;
850 else if (type == 6)
851 val = 1;
852 else if (type == 3)
853 val = 2;
854 else
855 val = 3;
856
857 val = (val << 12) | (val << 14);
858 b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
859 b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
860
861 if (type < 3) {
862 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
863 (type + 1) << 4);
864 b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
865 (type + 1) << 4);
866 }
867
868 /* TODO use some definitions */
869 if (code == 0) {
870 b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0);
871 if (type < 3) {
872 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
873 0xFEC7, 0);
874 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
875 0xEFDC, 0);
876 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
877 0xFFFE, 0);
878 udelay(20);
879 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
880 0xFFFE, 0);
881 }
882 } else {
883 b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF,
884 0x3000);
885 if (type < 3) {
886 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
887 0xFEC7, 0x0180);
888 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
889 0xEFDC, (code << 1 | 0x1021));
890 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
891 0xFFFE, 0x0001);
892 udelay(20);
893 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
894 0xFFFE, 0);
895 }
896 }
897 }
898}
899
900/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
901static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf)
902{
903 int i;
904 for (i = 0; i < 2; i++) {
905 if (type == 2) {
906 if (i == 0) {
907 b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM,
908 0xFC, buf[0]);
909 b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
910 0xFC, buf[1]);
911 } else {
912 b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM,
913 0xFC, buf[2 * i]);
914 b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
915 0xFC, buf[2 * i + 1]);
916 }
917 } else {
918 if (i == 0)
919 b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
920 0xF3, buf[0] << 2);
921 else
922 b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
923 0xF3, buf[2 * i + 1] << 2);
924 }
925 }
926}
927
928/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
929static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
930 u8 nsamp)
931{
932 int i;
933 int out;
934 u16 save_regs_phy[9];
935 u16 s[2];
936
937 if (dev->phy.rev >= 3) {
938 save_regs_phy[0] = b43_phy_read(dev,
939 B43_NPHY_RFCTL_LUT_TRSW_UP1);
940 save_regs_phy[1] = b43_phy_read(dev,
941 B43_NPHY_RFCTL_LUT_TRSW_UP2);
942 save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
943 save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
944 save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
945 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
946 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
947 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
948 }
949
950 b43_nphy_rssi_select(dev, 5, type);
951
952 if (dev->phy.rev < 2) {
953 save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL);
954 b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5);
955 }
956
957 for (i = 0; i < 4; i++)
958 buf[i] = 0;
959
960 for (i = 0; i < nsamp; i++) {
961 if (dev->phy.rev < 2) {
962 s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT);
963 s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT);
964 } else {
965 s[0] = b43_phy_read(dev, B43_NPHY_RSSI1);
966 s[1] = b43_phy_read(dev, B43_NPHY_RSSI2);
967 }
968
969 buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2;
970 buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2;
971 buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2;
972 buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2;
973 }
974 out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 |
975 (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF);
976
977 if (dev->phy.rev < 2)
978 b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]);
979
980 if (dev->phy.rev >= 3) {
981 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1,
982 save_regs_phy[0]);
983 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2,
984 save_regs_phy[1]);
985 b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]);
986 b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]);
987 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]);
988 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
989 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
990 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
991 }
992
993 return out;
994}
995
996/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
997static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
998{
999 int i, j;
1000 u8 state[4];
1001 u8 code, val;
1002 u16 class, override;
1003 u8 regs_save_radio[2];
1004 u16 regs_save_phy[2];
1005 s8 offset[4];
1006
1007 u16 clip_state[2];
1008 u16 clip_off[2] = { 0xFFFF, 0xFFFF };
1009 s32 results_min[4] = { };
1010 u8 vcm_final[4] = { };
1011 s32 results[4][4] = { };
1012 s32 miniq[4][2] = { };
1013
1014 if (type == 2) {
1015 code = 0;
1016 val = 6;
1017 } else if (type < 2) {
1018 code = 25;
1019 val = 4;
1020 } else {
1021 B43_WARN_ON(1);
1022 return;
1023 }
1024
1025 class = b43_nphy_classifier(dev, 0, 0);
1026 b43_nphy_classifier(dev, 7, 4);
1027 b43_nphy_read_clip_detection(dev, clip_state);
1028 b43_nphy_write_clip_detection(dev, clip_off);
1029
1030 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
1031 override = 0x140;
1032 else
1033 override = 0x110;
1034
1035 regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
1036 regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX);
1037 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override);
1038 b43_radio_write16(dev, B2055_C1_PD_RXTX, val);
1039
1040 regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
1041 regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX);
1042 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override);
1043 b43_radio_write16(dev, B2055_C2_PD_RXTX, val);
1044
1045 state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07;
1046 state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07;
1047 b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8);
1048 b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8);
1049 state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07;
1050 state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07;
1051
1052 b43_nphy_rssi_select(dev, 5, type);
1053 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type);
1054 b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type);
1055
1056 for (i = 0; i < 4; i++) {
1057 u8 tmp[4];
1058 for (j = 0; j < 4; j++)
1059 tmp[j] = i;
1060 if (type != 1)
1061 b43_nphy_set_rssi_2055_vcm(dev, type, tmp);
1062 b43_nphy_poll_rssi(dev, type, results[i], 8);
1063 if (type < 2)
1064 for (j = 0; j < 2; j++)
1065 miniq[i][j] = min(results[i][2 * j],
1066 results[i][2 * j + 1]);
1067 }
1068
1069 for (i = 0; i < 4; i++) {
1070 s32 mind = 40;
1071 u8 minvcm = 0;
1072 s32 minpoll = 249;
1073 s32 curr;
1074 for (j = 0; j < 4; j++) {
1075 if (type == 2)
1076 curr = abs(results[j][i]);
1077 else
1078 curr = abs(miniq[j][i / 2] - code * 8);
1079
1080 if (curr < mind) {
1081 mind = curr;
1082 minvcm = j;
1083 }
1084
1085 if (results[j][i] < minpoll)
1086 minpoll = results[j][i];
1087 }
1088 results_min[i] = minpoll;
1089 vcm_final[i] = minvcm;
1090 }
1091
1092 if (type != 1)
1093 b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final);
1094
1095 for (i = 0; i < 4; i++) {
1096 offset[i] = (code * 8) - results[vcm_final[i]][i];
1097
1098 if (offset[i] < 0)
1099 offset[i] = -((abs(offset[i]) + 4) / 8);
1100 else
1101 offset[i] = (offset[i] + 4) / 8;
1102
1103 if (results_min[i] == 248)
1104 offset[i] = code - 32;
1105
1106 if (i % 2 == 0)
1107 b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0,
1108 type);
1109 else
1110 b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1,
1111 type);
1112 }
1113
1114 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
1115 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]);
1116
1117 switch (state[2]) {
1118 case 1:
1119 b43_nphy_rssi_select(dev, 1, 2);
1120 break;
1121 case 4:
1122 b43_nphy_rssi_select(dev, 1, 0);
1123 break;
1124 case 2:
1125 b43_nphy_rssi_select(dev, 1, 1);
1126 break;
1127 default:
1128 b43_nphy_rssi_select(dev, 1, 1);
1129 break;
1130 }
1131
1132 switch (state[3]) {
1133 case 1:
1134 b43_nphy_rssi_select(dev, 2, 2);
1135 break;
1136 case 4:
1137 b43_nphy_rssi_select(dev, 2, 0);
1138 break;
1139 default:
1140 b43_nphy_rssi_select(dev, 2, 1);
1141 break;
1142 }
1143
1144 b43_nphy_rssi_select(dev, 0, type);
1145
1146 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]);
1147 b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]);
1148 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]);
1149 b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]);
1150
1151 b43_nphy_classifier(dev, 7, class);
1152 b43_nphy_write_clip_detection(dev, clip_state);
1153}
1154
1155/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
1156static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1157{
1158 /* TODO */
1159}
1160
1161/*
1162 * RSSI Calibration
1163 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
1164 */
1165static void b43_nphy_rssi_cal(struct b43_wldev *dev)
1166{
1167 if (dev->phy.rev >= 3) {
1168 b43_nphy_rev3_rssi_cal(dev);
1169 } else {
1170 b43_nphy_rev2_rssi_cal(dev, 2);
1171 b43_nphy_rev2_rssi_cal(dev, 0);
1172 b43_nphy_rev2_rssi_cal(dev, 1);
1173 }
1174}
1175
1176/*
1177 * Restore RSSI Calibration
1178 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
1179 */
1180static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
1181{
1182 struct b43_phy_n *nphy = dev->phy.n;
1183
1184 u16 *rssical_radio_regs = NULL;
1185 u16 *rssical_phy_regs = NULL;
1186
1187 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1188 if (!nphy->rssical_chanspec_2G)
1189 return;
1190 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
1191 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
1192 } else {
1193 if (!nphy->rssical_chanspec_5G)
1194 return;
1195 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
1196 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
1197 }
1198
1199 /* TODO use some definitions */
1200 b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]);
1201 b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]);
1202
1203 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]);
1204 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]);
1205 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]);
1206 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]);
1207
1208 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]);
1209 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]);
1210 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]);
1211 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]);
1212
1213 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]);
1214 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]);
1215 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]);
1216 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
1217}
1218
1219/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
1220static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
1221{
1222 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1223 if (dev->phy.rev >= 6) {
1224 /* TODO If the chip is 47162
1225 return txpwrctrl_tx_gain_ipa_rev5 */
1226 return txpwrctrl_tx_gain_ipa_rev6;
1227 } else if (dev->phy.rev >= 5) {
1228 return txpwrctrl_tx_gain_ipa_rev5;
1229 } else {
1230 return txpwrctrl_tx_gain_ipa;
1231 }
1232 } else {
1233 return txpwrctrl_tx_gain_ipa_5g;
1234 }
1235}
1236
1237/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
1238static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
1239{
1240 struct b43_phy_n *nphy = dev->phy.n;
1241 u16 *save = nphy->tx_rx_cal_radio_saveregs;
1242
1243 if (dev->phy.rev >= 3) {
1244 /* TODO */
1245 } else {
1246 save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1);
1247 b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29);
1248
1249 save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2);
1250 b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54);
1251
1252 save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1);
1253 b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29);
1254
1255 save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2);
1256 b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54);
1257
1258 save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX);
1259 save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX);
1260
1261 if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) &
1262 B43_NPHY_BANDCTL_5GHZ)) {
1263 b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04);
1264 b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04);
1265 } else {
1266 b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20);
1267 b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20);
1268 }
1269
1270 if (dev->phy.rev < 2) {
1271 b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20);
1272 b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20);
1273 } else {
1274 b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20);
1275 b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20);
1276 }
1277 }
1278}
1279
1280/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
1281static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
1282 struct nphy_txgains target,
1283 struct nphy_iqcal_params *params)
1284{
1285 int i, j, indx;
1286 u16 gain;
1287
1288 if (dev->phy.rev >= 3) {
1289 params->txgm = target.txgm[core];
1290 params->pga = target.pga[core];
1291 params->pad = target.pad[core];
1292 params->ipa = target.ipa[core];
1293 params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
1294 (params->pad << 4) | (params->ipa);
1295 for (j = 0; j < 5; j++)
1296 params->ncorr[j] = 0x79;
1297 } else {
1298 gain = (target.pad[core]) | (target.pga[core] << 4) |
1299 (target.txgm[core] << 8);
1300
1301 indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
1302 1 : 0;
1303 for (i = 0; i < 9; i++)
1304 if (tbl_iqcal_gainparams[indx][i][0] == gain)
1305 break;
1306 i = min(i, 8);
1307
1308 params->txgm = tbl_iqcal_gainparams[indx][i][1];
1309 params->pga = tbl_iqcal_gainparams[indx][i][2];
1310 params->pad = tbl_iqcal_gainparams[indx][i][3];
1311 params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
1312 (params->pad << 2);
1313 for (j = 0; j < 4; j++)
1314 params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
1315 }
1316}
1317
1318/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
1319static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
1320{
1321 struct b43_phy_n *nphy = dev->phy.n;
1322 int i;
1323 u16 scale, entry;
1324
1325 u16 tmp = nphy->txcal_bbmult;
1326 if (core == 0)
1327 tmp >>= 8;
1328 tmp &= 0xff;
1329
1330 for (i = 0; i < 18; i++) {
1331 scale = (ladder_lo[i].percent * tmp) / 100;
1332 entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env;
1333 /* TODO: Write an N PHY Table with ID 15, length 1,
1334 offset i, width 16, and data entry */
1335
1336 scale = (ladder_iq[i].percent * tmp) / 100;
1337 entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env;
1338 /* TODO: Write an N PHY Table with ID 15, length 1,
1339 offset i + 32, width 16, and data entry */
1340 }
1341}
1342
1343/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
1344static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
1345{
1346 struct b43_phy_n *nphy = dev->phy.n;
1347
1348 u16 curr_gain[2];
1349 struct nphy_txgains target;
1350 const u32 *table = NULL;
1351
1352 if (nphy->txpwrctrl == 0) {
1353 int i;
1354
1355 if (nphy->hang_avoid)
1356 b43_nphy_stay_in_carrier_search(dev, true);
1357 /* TODO: Read an N PHY Table with ID 7, length 2,
1358 offset 0x110, width 16, and curr_gain */
1359 if (nphy->hang_avoid)
1360 b43_nphy_stay_in_carrier_search(dev, false);
1361
1362 for (i = 0; i < 2; ++i) {
1363 if (dev->phy.rev >= 3) {
1364 target.ipa[i] = curr_gain[i] & 0x000F;
1365 target.pad[i] = (curr_gain[i] & 0x00F0) >> 4;
1366 target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
1367 target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
1368 } else {
1369 target.ipa[i] = curr_gain[i] & 0x0003;
1370 target.pad[i] = (curr_gain[i] & 0x000C) >> 2;
1371 target.pga[i] = (curr_gain[i] & 0x0070) >> 4;
1372 target.txgm[i] = (curr_gain[i] & 0x0380) >> 7;
1373 }
1374 }
1375 } else {
1376 int i;
1377 u16 index[2];
1378 index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) &
1379 B43_NPHY_TXPCTL_STAT_BIDX) >>
1380 B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
1381 index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) &
1382 B43_NPHY_TXPCTL_STAT_BIDX) >>
1383 B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
1384
1385 for (i = 0; i < 2; ++i) {
1386 if (dev->phy.rev >= 3) {
1387 enum ieee80211_band band =
1388 b43_current_band(dev->wl);
1389
1390 if ((nphy->ipa2g_on &&
1391 band == IEEE80211_BAND_2GHZ) ||
1392 (nphy->ipa5g_on &&
1393 band == IEEE80211_BAND_5GHZ)) {
1394 table = b43_nphy_get_ipa_gain_table(dev);
1395 } else {
1396 if (band == IEEE80211_BAND_5GHZ) {
1397 if (dev->phy.rev == 3)
1398 table = b43_ntab_tx_gain_rev3_5ghz;
1399 else if (dev->phy.rev == 4)
1400 table = b43_ntab_tx_gain_rev4_5ghz;
1401 else
1402 table = b43_ntab_tx_gain_rev5plus_5ghz;
1403 } else {
1404 table = b43_ntab_tx_gain_rev3plus_2ghz;
1405 }
1406 }
1407
1408 target.ipa[i] = (table[index[i]] >> 16) & 0xF;
1409 target.pad[i] = (table[index[i]] >> 20) & 0xF;
1410 target.pga[i] = (table[index[i]] >> 24) & 0xF;
1411 target.txgm[i] = (table[index[i]] >> 28) & 0xF;
1412 } else {
1413 table = b43_ntab_tx_gain_rev0_1_2;
1414
1415 target.ipa[i] = (table[index[i]] >> 16) & 0x3;
1416 target.pad[i] = (table[index[i]] >> 18) & 0x3;
1417 target.pga[i] = (table[index[i]] >> 20) & 0x7;
1418 target.txgm[i] = (table[index[i]] >> 23) & 0x7;
1419 }
1420 }
1421 }
1422
1423 return target;
1424}
1425
1426/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
1427static void b43_nphy_restore_cal(struct b43_wldev *dev)
1428{
1429 struct b43_phy_n *nphy = dev->phy.n;
1430
1431 u16 coef[4];
1432 u16 *loft = NULL;
1433 u16 *table = NULL;
1434
1435 int i;
1436 u16 *txcal_radio_regs = NULL;
1437 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
1438
1439 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1440 if (nphy->iqcal_chanspec_2G == 0)
1441 return;
1442 table = nphy->cal_cache.txcal_coeffs_2G;
1443 loft = &nphy->cal_cache.txcal_coeffs_2G[5];
1444 } else {
1445 if (nphy->iqcal_chanspec_5G == 0)
1446 return;
1447 table = nphy->cal_cache.txcal_coeffs_5G;
1448 loft = &nphy->cal_cache.txcal_coeffs_5G[5];
1449 }
1450
1451 /* TODO: Write an N PHY table with ID 15, length 4, offset 80,
1452 width 16, and data from table */
1453
1454 for (i = 0; i < 4; i++) {
1455 if (dev->phy.rev >= 3)
1456 table[i] = coef[i];
1457 else
1458 coef[i] = 0;
1459 }
1460
1461 /* TODO: Write an N PHY table with ID 15, length 4, offset 88,
1462 width 16, and data from coef */
1463 /* TODO: Write an N PHY table with ID 15, length 2, offset 85,
1464 width 16 and data from loft */
1465 /* TODO: Write an N PHY table with ID 15, length 2, offset 93,
1466 width 16 and data from loft */
1467
1468 if (dev->phy.rev < 2)
1469 b43_nphy_tx_iq_workaround(dev);
1470
1471 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1472 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
1473 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
1474 } else {
1475 txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
1476 rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
1477 }
1478
1479 /* TODO use some definitions */
1480 if (dev->phy.rev >= 3) {
1481 b43_radio_write(dev, 0x2021, txcal_radio_regs[0]);
1482 b43_radio_write(dev, 0x2022, txcal_radio_regs[1]);
1483 b43_radio_write(dev, 0x3021, txcal_radio_regs[2]);
1484 b43_radio_write(dev, 0x3022, txcal_radio_regs[3]);
1485 b43_radio_write(dev, 0x2023, txcal_radio_regs[4]);
1486 b43_radio_write(dev, 0x2024, txcal_radio_regs[5]);
1487 b43_radio_write(dev, 0x3023, txcal_radio_regs[6]);
1488 b43_radio_write(dev, 0x3024, txcal_radio_regs[7]);
1489 } else {
1490 b43_radio_write(dev, 0x8B, txcal_radio_regs[0]);
1491 b43_radio_write(dev, 0xBA, txcal_radio_regs[1]);
1492 b43_radio_write(dev, 0x8D, txcal_radio_regs[2]);
1493 b43_radio_write(dev, 0xBC, txcal_radio_regs[3]);
1494 }
1495 b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs);
1496}
1497
1498/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
1499static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
1500 struct nphy_txgains target,
1501 bool full, bool mphase)
1502{
1503 struct b43_phy_n *nphy = dev->phy.n;
1504 int i;
1505 int error = 0;
1506 int freq;
1507 bool avoid = false;
1508 u8 length;
1509 u16 tmp, core, type, count, max, numb, last, cmd;
1510 const u16 *table;
1511 bool phy6or5x;
1512
1513 u16 buffer[11];
1514 u16 diq_start = 0;
1515 u16 save[2];
1516 u16 gain[2];
1517 struct nphy_iqcal_params params[2];
1518 bool updated[2] = { };
1519
1520 b43_nphy_stay_in_carrier_search(dev, true);
1521
1522 if (dev->phy.rev >= 4) {
1523 avoid = nphy->hang_avoid;
1524 nphy->hang_avoid = 0;
1525 }
1526
1527 /* TODO: Read an N PHY Table with ID 7, length 2, offset 0x110,
1528 width 16, and data pointer save */
1529
1530 for (i = 0; i < 2; i++) {
1531 b43_nphy_iq_cal_gain_params(dev, i, target, &params[i]);
1532 gain[i] = params[i].cal_gain;
1533 }
1534 /* TODO: Write an N PHY Table with ID 7, length 2, offset 0x110,
1535 width 16, and data pointer gain */
1536
1537 b43_nphy_tx_cal_radio_setup(dev);
1538 /* TODO: Call N PHY TX Cal PHY Setup */
1539
1540 phy6or5x = dev->phy.rev >= 6 ||
1541 (dev->phy.rev == 5 && nphy->ipa2g_on &&
1542 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
1543 if (phy6or5x) {
1544 /* TODO */
1545 }
1546
1547 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
1548
1549 if (1 /* FIXME: the band width is 20 MHz */)
1550 freq = 2500;
1551 else
1552 freq = 5000;
1553
1554 if (nphy->mphase_cal_phase_id > 2)
1555 ;/* TODO: Call N PHY Run Samples with (band width * 8),
1556 0xFFFF, 0, 1, 0 as arguments */
1557 else
1558 ;/* TODO: Call N PHY TX Tone with freq, 250, 1, 0 as arguments
1559 and save result as error */
1560
1561 if (error == 0) {
1562 if (nphy->mphase_cal_phase_id > 2) {
1563 table = nphy->mphase_txcal_bestcoeffs;
1564 length = 11;
1565 if (dev->phy.rev < 3)
1566 length -= 2;
1567 } else {
1568 if (!full && nphy->txiqlocal_coeffsvalid) {
1569 table = nphy->txiqlocal_bestc;
1570 length = 11;
1571 if (dev->phy.rev < 3)
1572 length -= 2;
1573 } else {
1574 full = true;
1575 if (dev->phy.rev >= 3) {
1576 table = tbl_tx_iqlo_cal_startcoefs_nphyrev3;
1577 length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3;
1578 } else {
1579 table = tbl_tx_iqlo_cal_startcoefs;
1580 length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS;
1581 }
1582 }
1583 }
1584
1585 /* TODO: Write an N PHY Table with ID 15, length from above,
1586 offset 64, width 16, and the data pointer from above */
1587
1588 if (full) {
1589 if (dev->phy.rev >= 3)
1590 max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3;
1591 else
1592 max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL;
1593 } else {
1594 if (dev->phy.rev >= 3)
1595 max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3;
1596 else
1597 max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL;
1598 }
1599
1600 if (mphase) {
1601 count = nphy->mphase_txcal_cmdidx;
1602 numb = min(max,
1603 (u16)(count + nphy->mphase_txcal_numcmds));
1604 } else {
1605 count = 0;
1606 numb = max;
1607 }
1608
1609 for (; count < numb; count++) {
1610 if (full) {
1611 if (dev->phy.rev >= 3)
1612 cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count];
1613 else
1614 cmd = tbl_tx_iqlo_cal_cmds_fullcal[count];
1615 } else {
1616 if (dev->phy.rev >= 3)
1617 cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count];
1618 else
1619 cmd = tbl_tx_iqlo_cal_cmds_recal[count];
1620 }
1621
1622 core = (cmd & 0x3000) >> 12;
1623 type = (cmd & 0x0F00) >> 8;
1624
1625 if (phy6or5x && updated[core] == 0) {
1626 b43_nphy_update_tx_cal_ladder(dev, core);
1627 updated[core] = 1;
1628 }
1629
1630 tmp = (params[core].ncorr[type] << 8) | 0x66;
1631 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp);
1632
1633 if (type == 1 || type == 3 || type == 4) {
1634 /* TODO: Read an N PHY Table with ID 15,
1635 length 1, offset 69 + core,
1636 width 16, and data pointer buffer */
1637 diq_start = buffer[0];
1638 buffer[0] = 0;
1639 /* TODO: Write an N PHY Table with ID 15,
1640 length 1, offset 69 + core, width 16,
1641 and data of 0 */
1642 }
1643
1644 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd);
1645 for (i = 0; i < 2000; i++) {
1646 tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD);
1647 if (tmp & 0xC000)
1648 break;
1649 udelay(10);
1650 }
1651
1652 /* TODO: Read an N PHY Table with ID 15,
1653 length table_length, offset 96, width 16,
1654 and data pointer buffer */
1655 /* TODO: Write an N PHY Table with ID 15,
1656 length table_length, offset 64, width 16,
1657 and data pointer buffer */
1658
1659 if (type == 1 || type == 3 || type == 4)
1660 buffer[0] = diq_start;
1661 }
1662
1663 if (mphase)
1664 nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb;
1665
1666 last = (dev->phy.rev < 3) ? 6 : 7;
1667
1668 if (!mphase || nphy->mphase_cal_phase_id == last) {
1669 /* TODO: Write an N PHY Table with ID 15, length 4,
1670 offset 96, width 16, and data pointer buffer */
1671 /* TODO: Read an N PHY Table with ID 15, length 4,
1672 offset 80, width 16, and data pointer buffer */
1673 if (dev->phy.rev < 3) {
1674 buffer[0] = 0;
1675 buffer[1] = 0;
1676 buffer[2] = 0;
1677 buffer[3] = 0;
1678 }
1679 /* TODO: Write an N PHY Table with ID 15, length 4,
1680 offset 88, width 16, and data pointer buffer */
1681 /* TODO: Read an N PHY Table with ID 15, length 2,
1682 offset 101, width 16, and data pointer buffer*/
1683 /* TODO: Write an N PHY Table with ID 15, length 2,
1684 offset 85, width 16, and data pointer buffer */
1685 /* TODO: Write an N PHY Table with ID 15, length 2,
1686 offset 93, width 16, and data pointer buffer */
1687 length = 11;
1688 if (dev->phy.rev < 3)
1689 length -= 2;
1690 /* TODO: Read an N PHY Table with ID 15, length length,
1691 offset 96, width 16, and data pointer
1692 nphy->txiqlocal_bestc */
1693 nphy->txiqlocal_coeffsvalid = true;
1694 /* TODO: Set nphy->txiqlocal_chanspec to
1695 the current channel */
1696 } else {
1697 length = 11;
1698 if (dev->phy.rev < 3)
1699 length -= 2;
1700 /* TODO: Read an N PHY Table with ID 5, length length,
1701 offset 96, width 16, and data pointer
1702 nphy->mphase_txcal_bestcoeffs */
1703 }
1704
1705 /* TODO: Call N PHY Stop Playback */
1706 b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0);
1707 }
1708
1709 /* TODO: Call N PHY TX Cal PHY Cleanup */
1710 /* TODO: Write an N PHY Table with ID 7, length 2, offset 0x110,
1711 width 16, and data from save */
1712
1713 if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last))
1714 b43_nphy_tx_iq_workaround(dev);
1715
1716 if (dev->phy.rev >= 4)
1717 nphy->hang_avoid = avoid;
1718
1719 b43_nphy_stay_in_carrier_search(dev, false);
1720
1721 return error;
418} 1722}
419 1723
1724/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
1725static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
1726 struct nphy_txgains target, u8 type, bool debug)
1727{
1728 struct b43_phy_n *nphy = dev->phy.n;
1729 int i, j, index;
1730 u8 rfctl[2];
1731 u8 afectl_core;
1732 u16 tmp[6];
1733 u16 cur_hpf1, cur_hpf2, cur_lna;
1734 u32 real, imag;
1735 enum ieee80211_band band;
1736
1737 u8 use;
1738 u16 cur_hpf;
1739 u16 lna[3] = { 3, 3, 1 };
1740 u16 hpf1[3] = { 7, 2, 0 };
1741 u16 hpf2[3] = { 2, 0, 0 };
1742 u32 power[3];
1743 u16 gain_save[2];
1744 u16 cal_gain[2];
1745 struct nphy_iqcal_params cal_params[2];
1746 struct nphy_iq_est est;
1747 int ret = 0;
1748 bool playtone = true;
1749 int desired = 13;
1750
1751 b43_nphy_stay_in_carrier_search(dev, 1);
1752
1753 if (dev->phy.rev < 2)
1754 ;/* TODO: Call N PHY Reapply TX Cal Coeffs */
1755 /* TODO: Read an N PHY Table with ID 7, length 2, offset 0x110,
1756 width 16, and data gain_save */
1757 for (i = 0; i < 2; i++) {
1758 b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]);
1759 cal_gain[i] = cal_params[i].cal_gain;
1760 }
1761 /* TODO: Write an N PHY Table with ID 7, length 2, offset 0x110,
1762 width 16, and data from cal_gain */
1763
1764 for (i = 0; i < 2; i++) {
1765 if (i == 0) {
1766 rfctl[0] = B43_NPHY_RFCTL_INTC1;
1767 rfctl[1] = B43_NPHY_RFCTL_INTC2;
1768 afectl_core = B43_NPHY_AFECTL_C1;
1769 } else {
1770 rfctl[0] = B43_NPHY_RFCTL_INTC2;
1771 rfctl[1] = B43_NPHY_RFCTL_INTC1;
1772 afectl_core = B43_NPHY_AFECTL_C2;
1773 }
1774
1775 tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
1776 tmp[2] = b43_phy_read(dev, afectl_core);
1777 tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
1778 tmp[4] = b43_phy_read(dev, rfctl[0]);
1779 tmp[5] = b43_phy_read(dev, rfctl[1]);
1780
1781 b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
1782 (u16)~B43_NPHY_RFSEQCA_RXDIS,
1783 ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
1784 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
1785 (1 - i));
1786 b43_phy_set(dev, afectl_core, 0x0006);
1787 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006);
1788
1789 band = b43_current_band(dev->wl);
1790
1791 if (nphy->rxcalparams & 0xFF000000) {
1792 if (band == IEEE80211_BAND_5GHZ)
1793 b43_phy_write(dev, rfctl[0], 0x140);
1794 else
1795 b43_phy_write(dev, rfctl[0], 0x110);
1796 } else {
1797 if (band == IEEE80211_BAND_5GHZ)
1798 b43_phy_write(dev, rfctl[0], 0x180);
1799 else
1800 b43_phy_write(dev, rfctl[0], 0x120);
1801 }
1802
1803 if (band == IEEE80211_BAND_5GHZ)
1804 b43_phy_write(dev, rfctl[1], 0x148);
1805 else
1806 b43_phy_write(dev, rfctl[1], 0x114);
1807
1808 if (nphy->rxcalparams & 0x10000) {
1809 b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC,
1810 (i + 1));
1811 b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC,
1812 (2 - i));
1813 }
1814
1815 for (j = 0; i < 4; j++) {
1816 if (j < 3) {
1817 cur_lna = lna[j];
1818 cur_hpf1 = hpf1[j];
1819 cur_hpf2 = hpf2[j];
1820 } else {
1821 if (power[1] > 10000) {
1822 use = 1;
1823 cur_hpf = cur_hpf1;
1824 index = 2;
1825 } else {
1826 if (power[0] > 10000) {
1827 use = 1;
1828 cur_hpf = cur_hpf1;
1829 index = 1;
1830 } else {
1831 index = 0;
1832 use = 2;
1833 cur_hpf = cur_hpf2;
1834 }
1835 }
1836 cur_lna = lna[index];
1837 cur_hpf1 = hpf1[index];
1838 cur_hpf2 = hpf2[index];
1839 cur_hpf += desired - hweight32(power[index]);
1840 cur_hpf = clamp_val(cur_hpf, 0, 10);
1841 if (use == 1)
1842 cur_hpf1 = cur_hpf;
1843 else
1844 cur_hpf2 = cur_hpf;
1845 }
1846
1847 tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) |
1848 (cur_lna << 2));
1849 /* TODO:Call N PHY RF Ctrl Override with 0x400, tmp[0],
1850 3, 0 as arguments */
1851 /* TODO: Call N PHY Force RF Seq with 2 as argument */
1852 /* TODO: Call N PHT Stop Playback */
1853
1854 if (playtone) {
1855 /* TODO: Call N PHY TX Tone with 4000,
1856 (nphy_rxcalparams & 0xffff), 0, 0
1857 as arguments and save result as ret */
1858 playtone = false;
1859 } else {
1860 /* TODO: Call N PHY Run Samples with 160,
1861 0xFFFF, 0, 0, 0 as arguments */
1862 }
1863
1864 if (ret == 0) {
1865 if (j < 3) {
1866 b43_nphy_rx_iq_est(dev, &est, 1024, 32,
1867 false);
1868 if (i == 0) {
1869 real = est.i0_pwr;
1870 imag = est.q0_pwr;
1871 } else {
1872 real = est.i1_pwr;
1873 imag = est.q1_pwr;
1874 }
1875 power[i] = ((real + imag) / 1024) + 1;
1876 } else {
1877 b43_nphy_calc_rx_iq_comp(dev, 1 << i);
1878 }
1879 /* TODO: Call N PHY Stop Playback */
1880 }
1881
1882 if (ret != 0)
1883 break;
1884 }
1885
1886 b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC);
1887 b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC);
1888 b43_phy_write(dev, rfctl[1], tmp[5]);
1889 b43_phy_write(dev, rfctl[0], tmp[4]);
1890 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]);
1891 b43_phy_write(dev, afectl_core, tmp[2]);
1892 b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]);
1893
1894 if (ret != 0)
1895 break;
1896 }
1897
1898 /* TODO: Call N PHY RF Ctrl Override with 0x400, 0, 3, 1 as arguments*/
1899 /* TODO: Call N PHY Force RF Seq with 2 as argument */
1900 /* TODO: Write an N PHY Table with ID 7, length 2, offset 0x110,
1901 width 16, and data from gain_save */
1902
1903 b43_nphy_stay_in_carrier_search(dev, 0);
1904
1905 return ret;
1906}
1907
1908static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev,
1909 struct nphy_txgains target, u8 type, bool debug)
1910{
1911 return -1;
1912}
1913
1914/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
1915static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
1916 struct nphy_txgains target, u8 type, bool debug)
1917{
1918 if (dev->phy.rev >= 3)
1919 return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug);
1920 else
1921 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
1922}
1923
1924/*
1925 * Init N-PHY
1926 * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
1927 */
420int b43_phy_initn(struct b43_wldev *dev) 1928int b43_phy_initn(struct b43_wldev *dev)
421{ 1929{
1930 struct ssb_bus *bus = dev->dev->bus;
422 struct b43_phy *phy = &dev->phy; 1931 struct b43_phy *phy = &dev->phy;
1932 struct b43_phy_n *nphy = phy->n;
1933 u8 tx_pwr_state;
1934 struct nphy_txgains target;
423 u16 tmp; 1935 u16 tmp;
1936 enum ieee80211_band tmp2;
1937 bool do_rssi_cal;
1938
1939 u16 clip[2];
1940 bool do_cal = false;
424 1941
425 //TODO: Spectral management 1942 if ((dev->phy.rev >= 3) &&
1943 (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
1944 (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
1945 chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
1946 }
1947 nphy->deaf_count = 0;
426 b43_nphy_tables_init(dev); 1948 b43_nphy_tables_init(dev);
1949 nphy->crsminpwr_adjusted = false;
1950 nphy->noisevars_adjusted = false;
427 1951
428 /* Clear all overrides */ 1952 /* Clear all overrides */
429 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0); 1953 if (dev->phy.rev >= 3) {
1954 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0);
1955 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
1956 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0);
1957 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0);
1958 } else {
1959 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
1960 }
430 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0); 1961 b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0);
431 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0); 1962 b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0);
432 b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0); 1963 if (dev->phy.rev < 6) {
433 b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0); 1964 b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
1965 b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
1966 }
434 b43_phy_mask(dev, B43_NPHY_RFSEQMODE, 1967 b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
435 ~(B43_NPHY_RFSEQMODE_CAOVER | 1968 ~(B43_NPHY_RFSEQMODE_CAOVER |
436 B43_NPHY_RFSEQMODE_TROVER)); 1969 B43_NPHY_RFSEQMODE_TROVER));
1970 if (dev->phy.rev >= 3)
1971 b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0);
437 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0); 1972 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0);
438 1973
439 tmp = (phy->rev < 2) ? 64 : 59; 1974 if (dev->phy.rev <= 2) {
440 b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, 1975 tmp = (dev->phy.rev == 2) ? 0x3B : 0x40;
441 ~B43_NPHY_BPHY_CTL3_SCALE, 1976 b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
442 tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT); 1977 ~B43_NPHY_BPHY_CTL3_SCALE,
443 1978 tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
1979 }
444 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20); 1980 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
445 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20); 1981 b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
446 1982
447 b43_phy_write(dev, B43_NPHY_TXREALFD, 184); 1983 if (bus->sprom.boardflags2_lo & 0x100 ||
448 b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 200); 1984 (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
449 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 80); 1985 bus->boardinfo.type == 0x8B))
450 b43_phy_write(dev, B43_NPHY_C2_BCLIPBKOFF, 511); 1986 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
1987 else
1988 b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
1989 b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8);
1990 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50);
1991 b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30);
451 1992
452 //TODO MIMO-Config 1993 /* TODO MIMO-Config */
453 //TODO Update TX/RX chain 1994 /* TODO Update TX/RX chain */
454 1995
455 if (phy->rev < 2) { 1996 if (phy->rev < 2) {
456 b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8); 1997 b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8);
457 b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4); 1998 b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4);
458 } 1999 }
2000
2001 tmp2 = b43_current_band(dev->wl);
2002 if ((nphy->ipa2g_on && tmp2 == IEEE80211_BAND_2GHZ) ||
2003 (nphy->ipa5g_on && tmp2 == IEEE80211_BAND_5GHZ)) {
2004 b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1);
2005 b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F,
2006 nphy->papd_epsilon_offset[0] << 7);
2007 b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1);
2008 b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F,
2009 nphy->papd_epsilon_offset[1] << 7);
2010 /* TODO N PHY IPA Set TX Dig Filters */
2011 } else if (phy->rev >= 5) {
2012 /* TODO N PHY Ext PA Set TX Dig Filters */
2013 }
2014
459 b43_nphy_workarounds(dev); 2015 b43_nphy_workarounds(dev);
460 b43_nphy_reset_cca(dev);
461 2016
462 ssb_write32(dev->dev, SSB_TMSLOW, 2017 /* Reset CCA, in init code it differs a little from standard way */
463 ssb_read32(dev->dev, SSB_TMSLOW) | B43_TMSLOW_MACPHYCLKEN); 2018 b43_nphy_bmac_clock_fgc(dev, 1);
2019 tmp = b43_phy_read(dev, B43_NPHY_BBCFG);
2020 b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA);
2021 b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
2022 b43_nphy_bmac_clock_fgc(dev, 0);
2023
2024 /* TODO N PHY MAC PHY Clock Set with argument 1 */
2025
2026 b43_nphy_pa_override(dev, false);
464 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); 2027 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
465 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); 2028 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
2029 b43_nphy_pa_override(dev, true);
2030
2031 b43_nphy_classifier(dev, 0, 0);
2032 b43_nphy_read_clip_detection(dev, clip);
2033 tx_pwr_state = nphy->txpwrctrl;
2034 /* TODO N PHY TX power control with argument 0
2035 (turning off power control) */
2036 /* TODO Fix the TX Power Settings */
2037 /* TODO N PHY TX Power Control Idle TSSI */
2038 /* TODO N PHY TX Power Control Setup */
2039
2040 if (phy->rev >= 3) {
2041 /* TODO */
2042 } else {
2043 /* TODO Write an N PHY table with ID 26, length 128, offset 192, width 32, and the data from Rev 2 TX Power Control Table */
2044 /* TODO Write an N PHY table with ID 27, length 128, offset 192, width 32, and the data from Rev 2 TX Power Control Table */
2045 }
2046
2047 if (nphy->phyrxchain != 3)
2048 ;/* TODO N PHY RX Core Set State with phyrxchain as argument */
2049 if (nphy->mphase_cal_phase_id > 0)
2050 ;/* TODO PHY Periodic Calibration Multi-Phase Restart */
2051
2052 do_rssi_cal = false;
2053 if (phy->rev >= 3) {
2054 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
2055 do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
2056 else
2057 do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
2058
2059 if (do_rssi_cal)
2060 b43_nphy_rssi_cal(dev);
2061 else
2062 b43_nphy_restore_rssi_cal(dev);
2063 } else {
2064 b43_nphy_rssi_cal(dev);
2065 }
2066
2067 if (!((nphy->measure_hold & 0x6) != 0)) {
2068 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
2069 do_cal = (nphy->iqcal_chanspec_2G == 0);
2070 else
2071 do_cal = (nphy->iqcal_chanspec_5G == 0);
2072
2073 if (nphy->mute)
2074 do_cal = false;
2075
2076 if (do_cal) {
2077 target = b43_nphy_get_tx_gains(dev);
2078
2079 if (nphy->antsel_type == 2)
2080 ;/*TODO NPHY Superswitch Init with argument 1*/
2081 if (nphy->perical != 2) {
2082 b43_nphy_rssi_cal(dev);
2083 if (phy->rev >= 3) {
2084 nphy->cal_orig_pwr_idx[0] =
2085 nphy->txpwrindex[0].index_internal;
2086 nphy->cal_orig_pwr_idx[1] =
2087 nphy->txpwrindex[1].index_internal;
2088 /* TODO N PHY Pre Calibrate TX Gain */
2089 target = b43_nphy_get_tx_gains(dev);
2090 }
2091 }
2092 }
2093 }
2094
2095 if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
2096 if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
2097 ;/* Call N PHY Save Cal */
2098 else if (nphy->mphase_cal_phase_id == 0)
2099 ;/* N PHY Periodic Calibration with argument 3 */
2100 } else {
2101 b43_nphy_restore_cal(dev);
2102 }
466 2103
467 b43_phy_read(dev, B43_NPHY_CLASSCTL); /* dummy read */ 2104 b43_nphy_tx_pwr_ctrl_coef_setup(dev);
468 //TODO read core1/2 clip1 thres regs 2105 /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */
469 2106 b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
470 if (1 /* FIXME Band is 2.4GHz */) 2107 b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
471 b43_nphy_bphy_init(dev); 2108 if (phy->rev >= 3 && phy->rev <= 6)
472 //TODO disable TX power control 2109 b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
473 //TODO Fix the TX power settings 2110 b43_nphy_tx_lp_fbw(dev);
474 //TODO Init periodic calibration with reason 3 2111 /* TODO N PHY Spur Workaround */
475 b43_nphy_rssi_cal(dev, 2);
476 b43_nphy_rssi_cal(dev, 0);
477 b43_nphy_rssi_cal(dev, 1);
478 //TODO get TX gain
479 //TODO init superswitch
480 //TODO calibrate LO
481 //TODO idle TSSI TX pctl
482 //TODO TX power control power setup
483 //TODO table writes
484 //TODO TX power control coefficients
485 //TODO enable TX power control
486 //TODO control antenna selection
487 //TODO init radar detection
488 //TODO reset channel if changed
489 2112
490 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n"); 2113 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
491 return 0; 2114 return 0;
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 1749aef4147..4572866756f 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -231,6 +231,7 @@
231#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */ 231#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */
232#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */ 232#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */
233#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */ 233#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */
234#define B43_NPHY_AFECTL_OVER1 B43_PHY_N(0x08F) /* AFE control override 1 */
234#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */ 235#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */
235#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */ 236#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */
236#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0 237#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0
@@ -705,6 +706,10 @@
705#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */ 706#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */
706#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */ 707#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */
707#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0 708#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0
709#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */
710#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */
711#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
712#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
708 713
709 714
710 715
@@ -919,8 +924,88 @@
919 924
920struct b43_wldev; 925struct b43_wldev;
921 926
927struct b43_phy_n_iq_comp {
928 s16 a0;
929 s16 b0;
930 s16 a1;
931 s16 b1;
932};
933
934struct b43_phy_n_rssical_cache {
935 u16 rssical_radio_regs_2G[2];
936 u16 rssical_phy_regs_2G[12];
937
938 u16 rssical_radio_regs_5G[2];
939 u16 rssical_phy_regs_5G[12];
940};
941
942struct b43_phy_n_cal_cache {
943 u16 txcal_radio_regs_2G[8];
944 u16 txcal_coeffs_2G[8];
945 struct b43_phy_n_iq_comp rxcal_coeffs_2G;
946
947 u16 txcal_radio_regs_5G[8];
948 u16 txcal_coeffs_5G[8];
949 struct b43_phy_n_iq_comp rxcal_coeffs_5G;
950};
951
952struct b43_phy_n_txpwrindex {
953 s8 index;
954 s8 index_internal;
955 s8 index_internal_save;
956 u16 AfectrlOverride;
957 u16 AfeCtrlDacGain;
958 u16 rad_gain;
959 u8 bbmult;
960 u16 iqcomp_a;
961 u16 iqcomp_b;
962 u16 locomp;
963};
964
922struct b43_phy_n { 965struct b43_phy_n {
923 //TODO lots of missing stuff 966 u8 antsel_type;
967 u8 cal_orig_pwr_idx[2];
968 u8 measure_hold;
969 u8 phyrxchain;
970 u8 perical;
971 u32 deaf_count;
972 u32 rxcalparams;
973 bool hang_avoid;
974 bool mute;
975 u16 papd_epsilon_offset[2];
976
977 u8 mphase_cal_phase_id;
978 u16 mphase_txcal_cmdidx;
979 u16 mphase_txcal_numcmds;
980 u16 mphase_txcal_bestcoeffs[11];
981
982 u8 txpwrctrl;
983 u16 txcal_bbmult;
984 u16 txiqlocal_bestc[11];
985 bool txiqlocal_coeffsvalid;
986 struct b43_phy_n_txpwrindex txpwrindex[2];
987
988 u16 tx_rx_cal_phy_saveregs[11];
989 u16 tx_rx_cal_radio_saveregs[22];
990
991 u16 rfctrl_intc1_save;
992 u16 rfctrl_intc2_save;
993
994 u16 classifier_state;
995 u16 clip_state[2];
996
997 bool ipa2g_on;
998 u8 iqcal_chanspec_2G;
999 u8 rssical_chanspec_2G;
1000
1001 bool ipa5g_on;
1002 u8 iqcal_chanspec_5G;
1003 u8 rssical_chanspec_5G;
1004
1005 struct b43_phy_n_rssical_cache rssical_cache;
1006 struct b43_phy_n_cal_cache cal_cache;
1007 bool crsminpwr_adjusted;
1008 bool noisevars_adjusted;
924}; 1009};
925 1010
926 1011
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 7dd649c9dda..7b3c42f93a1 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -55,8 +55,6 @@
55#define B43_PIO_MAX_NR_TXPACKETS 32 55#define B43_PIO_MAX_NR_TXPACKETS 32
56 56
57 57
58#ifdef CONFIG_B43_PIO
59
60struct b43_pio_txpacket { 58struct b43_pio_txpacket {
61 /* Pointer to the TX queue we belong to. */ 59 /* Pointer to the TX queue we belong to. */
62 struct b43_pio_txqueue *queue; 60 struct b43_pio_txqueue *queue;
@@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *q);
169void b43_pio_tx_suspend(struct b43_wldev *dev); 167void b43_pio_tx_suspend(struct b43_wldev *dev);
170void b43_pio_tx_resume(struct b43_wldev *dev); 168void b43_pio_tx_resume(struct b43_wldev *dev);
171 169
172
173#else /* CONFIG_B43_PIO */
174
175
176static inline int b43_pio_init(struct b43_wldev *dev)
177{
178 return 0;
179}
180static inline void b43_pio_free(struct b43_wldev *dev)
181{
182}
183static inline void b43_pio_stop(struct b43_wldev *dev)
184{
185}
186static inline int b43_pio_tx(struct b43_wldev *dev,
187 struct sk_buff *skb)
188{
189 return 0;
190}
191static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
192 const struct b43_txstatus *status)
193{
194}
195static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
196 struct ieee80211_tx_queue_stats *stats)
197{
198}
199static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
200{
201}
202static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
203{
204}
205static inline void b43_pio_tx_resume(struct b43_wldev *dev)
206{
207}
208
209#endif /* CONFIG_B43_PIO */
210#endif /* B43_PIO_H_ */ 170#endif /* B43_PIO_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 4e233631554..7dff853ab96 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1336,7 +1336,7 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
1336} 1336}
1337 1337
1338 1338
1339const u8 b43_ntab_adjustpower0[] = { 1339static const u8 b43_ntab_adjustpower0[] = {
1340 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 1340 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
1341 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 1341 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
1342 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 1342 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1355,7 +1355,7 @@ const u8 b43_ntab_adjustpower0[] = {
1355 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, 1355 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
1356}; 1356};
1357 1357
1358const u8 b43_ntab_adjustpower1[] = { 1358static const u8 b43_ntab_adjustpower1[] = {
1359 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 1359 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
1360 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 1360 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
1361 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 1361 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1374,11 +1374,11 @@ const u8 b43_ntab_adjustpower1[] = {
1374 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, 1374 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
1375}; 1375};
1376 1376
1377const u16 b43_ntab_bdi[] = { 1377static const u16 b43_ntab_bdi[] = {
1378 0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2, 1378 0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2,
1379}; 1379};
1380 1380
1381const u32 b43_ntab_channelest[] = { 1381static const u32 b43_ntab_channelest[] = {
1382 0x44444444, 0x44444444, 0x44444444, 0x44444444, 1382 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1383 0x44444444, 0x44444444, 0x44444444, 0x44444444, 1383 0x44444444, 0x44444444, 0x44444444, 0x44444444,
1384 0x10101010, 0x10101010, 0x10101010, 0x10101010, 1384 0x10101010, 0x10101010, 0x10101010, 0x10101010,
@@ -1405,7 +1405,7 @@ const u32 b43_ntab_channelest[] = {
1405 0x10101010, 0x10101010, 0x10101010, 0x10101010, 1405 0x10101010, 0x10101010, 0x10101010, 0x10101010,
1406}; 1406};
1407 1407
1408const u8 b43_ntab_estimatepowerlt0[] = { 1408static const u8 b43_ntab_estimatepowerlt0[] = {
1409 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 1409 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
1410 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 1410 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
1411 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 1411 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1416,7 +1416,7 @@ const u8 b43_ntab_estimatepowerlt0[] = {
1416 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 1416 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
1417}; 1417};
1418 1418
1419const u8 b43_ntab_estimatepowerlt1[] = { 1419static const u8 b43_ntab_estimatepowerlt1[] = {
1420 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 1420 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
1421 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 1421 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
1422 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 1422 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1427,14 +1427,14 @@ const u8 b43_ntab_estimatepowerlt1[] = {
1427 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 1427 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
1428}; 1428};
1429 1429
1430const u8 b43_ntab_framelookup[] = { 1430static const u8 b43_ntab_framelookup[] = {
1431 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16, 1431 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
1432 0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E, 1432 0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E,
1433 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A, 1433 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A,
1434 0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A, 1434 0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A,
1435}; 1435};
1436 1436
1437const u32 b43_ntab_framestruct[] = { 1437static const u32 b43_ntab_framestruct[] = {
1438 0x08004A04, 0x00100000, 0x01000A05, 0x00100020, 1438 0x08004A04, 0x00100000, 0x01000A05, 0x00100020,
1439 0x09804506, 0x00100030, 0x09804507, 0x00100030, 1439 0x09804506, 0x00100030, 0x09804507, 0x00100030,
1440 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1440 0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -1645,7 +1645,7 @@ const u32 b43_ntab_framestruct[] = {
1645 0x00000000, 0x00000000, 0x00000000, 0x00000000, 1645 0x00000000, 0x00000000, 0x00000000, 0x00000000,
1646}; 1646};
1647 1647
1648const u32 b43_ntab_gainctl0[] = { 1648static const u32 b43_ntab_gainctl0[] = {
1649 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, 1649 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
1650 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, 1650 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
1651 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, 1651 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1680,7 +1680,7 @@ const u32 b43_ntab_gainctl0[] = {
1680 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, 1680 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
1681}; 1681};
1682 1682
1683const u32 b43_ntab_gainctl1[] = { 1683static const u32 b43_ntab_gainctl1[] = {
1684 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, 1684 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
1685 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, 1685 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
1686 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, 1686 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1715,12 +1715,12 @@ const u32 b43_ntab_gainctl1[] = {
1715 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, 1715 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
1716}; 1716};
1717 1717
1718const u32 b43_ntab_intlevel[] = { 1718static const u32 b43_ntab_intlevel[] = {
1719 0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46, 1719 0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46,
1720 0x00C1188D, 0x080024D2, 0x00000070, 1720 0x00C1188D, 0x080024D2, 0x00000070,
1721}; 1721};
1722 1722
1723const u32 b43_ntab_iqlt0[] = { 1723static const u32 b43_ntab_iqlt0[] = {
1724 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1724 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1725 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1725 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1726 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1726 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1755,7 +1755,7 @@ const u32 b43_ntab_iqlt0[] = {
1755 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1755 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1756}; 1756};
1757 1757
1758const u32 b43_ntab_iqlt1[] = { 1758static const u32 b43_ntab_iqlt1[] = {
1759 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1759 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1760 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1760 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1761 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1761 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1790,7 +1790,7 @@ const u32 b43_ntab_iqlt1[] = {
1790 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 1790 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
1791}; 1791};
1792 1792
1793const u16 b43_ntab_loftlt0[] = { 1793static const u16 b43_ntab_loftlt0[] = {
1794 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1794 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
1795 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 1795 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
1796 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1796 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1815,7 +1815,7 @@ const u16 b43_ntab_loftlt0[] = {
1815 0x0002, 0x0103, 1815 0x0002, 0x0103,
1816}; 1816};
1817 1817
1818const u16 b43_ntab_loftlt1[] = { 1818static const u16 b43_ntab_loftlt1[] = {
1819 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1819 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
1820 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 1820 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
1821 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 1821 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1840,7 +1840,7 @@ const u16 b43_ntab_loftlt1[] = {
1840 0x0002, 0x0103, 1840 0x0002, 0x0103,
1841}; 1841};
1842 1842
1843const u8 b43_ntab_mcs[] = { 1843static const u8 b43_ntab_mcs[] = {
1844 0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C, 1844 0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C,
1845 0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C, 1845 0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C,
1846 0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C, 1846 0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C,
@@ -1859,7 +1859,7 @@ const u8 b43_ntab_mcs[] = {
1859 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1859 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1860}; 1860};
1861 1861
1862const u32 b43_ntab_noisevar10[] = { 1862static const u32 b43_ntab_noisevar10[] = {
1863 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1863 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1864 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1864 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1865 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1865 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1926,7 +1926,7 @@ const u32 b43_ntab_noisevar10[] = {
1926 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1926 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1927}; 1927};
1928 1928
1929const u32 b43_ntab_noisevar11[] = { 1929static const u32 b43_ntab_noisevar11[] = {
1930 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1930 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1931 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1931 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1932 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1932 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1993,7 +1993,7 @@ const u32 b43_ntab_noisevar11[] = {
1993 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 1993 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
1994}; 1994};
1995 1995
1996const u16 b43_ntab_pilot[] = { 1996static const u16 b43_ntab_pilot[] = {
1997 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 1997 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08,
1998 0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5, 1998 0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5,
1999 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82, 1999 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82,
@@ -2011,12 +2011,12 @@ const u16 b43_ntab_pilot[] = {
2011 0xF0A0, 0xF028, 0xFFFF, 0xFFFF, 2011 0xF0A0, 0xF028, 0xFFFF, 0xFFFF,
2012}; 2012};
2013 2013
2014const u32 b43_ntab_pilotlt[] = { 2014static const u32 b43_ntab_pilotlt[] = {
2015 0x76540123, 0x62407351, 0x76543201, 0x76540213, 2015 0x76540123, 0x62407351, 0x76543201, 0x76540213,
2016 0x76540123, 0x76430521, 2016 0x76540123, 0x76430521,
2017}; 2017};
2018 2018
2019const u32 b43_ntab_tdi20a0[] = { 2019static const u32 b43_ntab_tdi20a0[] = {
2020 0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0, 2020 0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0,
2021 0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D, 2021 0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D,
2022 0x00020301, 0x00030504, 0x00040708, 0x0005090B, 2022 0x00020301, 0x00030504, 0x00040708, 0x0005090B,
@@ -2033,7 +2033,7 @@ const u32 b43_ntab_tdi20a0[] = {
2033 0x00000000, 0x00000000, 0x00000000, 2033 0x00000000, 0x00000000, 0x00000000,
2034}; 2034};
2035 2035
2036const u32 b43_ntab_tdi20a1[] = { 2036static const u32 b43_ntab_tdi20a1[] = {
2037 0x00014B26, 0x00028D29, 0x000393AD, 0x00049630, 2037 0x00014B26, 0x00028D29, 0x000393AD, 0x00049630,
2038 0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D, 2038 0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D,
2039 0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B, 2039 0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B,
@@ -2050,7 +2050,7 @@ const u32 b43_ntab_tdi20a1[] = {
2050 0x00000000, 0x00000000, 0x00000000, 2050 0x00000000, 0x00000000, 0x00000000,
2051}; 2051};
2052 2052
2053const u32 b43_ntab_tdi40a0[] = { 2053static const u32 b43_ntab_tdi40a0[] = {
2054 0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2, 2054 0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2,
2055 0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C, 2055 0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C,
2056 0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2, 2056 0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2,
@@ -2081,7 +2081,7 @@ const u32 b43_ntab_tdi40a0[] = {
2081 0x00000000, 0x00000000, 2081 0x00000000, 0x00000000,
2082}; 2082};
2083 2083
2084const u32 b43_ntab_tdi40a1[] = { 2084static const u32 b43_ntab_tdi40a1[] = {
2085 0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD, 2085 0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD,
2086 0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07, 2086 0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07,
2087 0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D, 2087 0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D,
@@ -2112,7 +2112,7 @@ const u32 b43_ntab_tdi40a1[] = {
2112 0x00000000, 0x00000000, 2112 0x00000000, 0x00000000,
2113}; 2113};
2114 2114
2115const u32 b43_ntab_tdtrn[] = { 2115static const u32 b43_ntab_tdtrn[] = {
2116 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6, 2116 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6,
2117 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68, 2117 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68,
2118 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52, 2118 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52,
@@ -2291,7 +2291,7 @@ const u32 b43_ntab_tdtrn[] = {
2291 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE, 2291 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE,
2292}; 2292};
2293 2293
2294const u32 b43_ntab_tmap[] = { 2294static const u32 b43_ntab_tmap[] = {
2295 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 2295 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888,
2296 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 2296 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8,
2297 0xF1111110, 0x11111111, 0x11F11111, 0x00000111, 2297 0xF1111110, 0x11111111, 0x11F11111, 0x00000111,
@@ -2406,6 +2406,483 @@ const u32 b43_ntab_tmap[] = {
2406 0x00000000, 0x00000000, 0x00000000, 0x00000000, 2406 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2407}; 2407};
2408 2408
2409const u32 b43_ntab_tx_gain_rev0_1_2[] = {
2410 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
2411 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
2412 0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844,
2413 0x03c82842, 0x03c42b44, 0x03c42b42, 0x03c42a44,
2414 0x03c42a42, 0x03c42944, 0x03c42942, 0x03c42844,
2415 0x03c42842, 0x03c42744, 0x03c42742, 0x03c42644,
2416 0x03c42642, 0x03c42544, 0x03c42542, 0x03c42444,
2417 0x03c42442, 0x03c02b44, 0x03c02b42, 0x03c02a44,
2418 0x03c02a42, 0x03c02944, 0x03c02942, 0x03c02844,
2419 0x03c02842, 0x03c02744, 0x03c02742, 0x03b02b44,
2420 0x03b02b42, 0x03b02a44, 0x03b02a42, 0x03b02944,
2421 0x03b02942, 0x03b02844, 0x03b02842, 0x03b02744,
2422 0x03b02742, 0x03b02644, 0x03b02642, 0x03b02544,
2423 0x03b02542, 0x03a02b44, 0x03a02b42, 0x03a02a44,
2424 0x03a02a42, 0x03a02944, 0x03a02942, 0x03a02844,
2425 0x03a02842, 0x03a02744, 0x03a02742, 0x03902b44,
2426 0x03902b42, 0x03902a44, 0x03902a42, 0x03902944,
2427 0x03902942, 0x03902844, 0x03902842, 0x03902744,
2428 0x03902742, 0x03902644, 0x03902642, 0x03902544,
2429 0x03902542, 0x03802b44, 0x03802b42, 0x03802a44,
2430 0x03802a42, 0x03802944, 0x03802942, 0x03802844,
2431 0x03802842, 0x03802744, 0x03802742, 0x03802644,
2432 0x03802642, 0x03802544, 0x03802542, 0x03802444,
2433 0x03802442, 0x03802344, 0x03802342, 0x03802244,
2434 0x03802242, 0x03802144, 0x03802142, 0x03802044,
2435 0x03802042, 0x03801f44, 0x03801f42, 0x03801e44,
2436 0x03801e42, 0x03801d44, 0x03801d42, 0x03801c44,
2437 0x03801c42, 0x03801b44, 0x03801b42, 0x03801a44,
2438 0x03801a42, 0x03801944, 0x03801942, 0x03801844,
2439 0x03801842, 0x03801744, 0x03801742, 0x03801644,
2440 0x03801642, 0x03801544, 0x03801542, 0x03801444,
2441 0x03801442, 0x03801344, 0x03801342, 0x00002b00,
2442};
2443
2444const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
2445 0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e,
2446 0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037,
2447 0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e,
2448 0x1e41003c, 0x1e41003b, 0x1e410039, 0x1e410037,
2449 0x1d410044, 0x1d410042, 0x1d410040, 0x1d41003e,
2450 0x1d41003c, 0x1d41003b, 0x1d410039, 0x1d410037,
2451 0x1c410044, 0x1c410042, 0x1c410040, 0x1c41003e,
2452 0x1c41003c, 0x1c41003b, 0x1c410039, 0x1c410037,
2453 0x1b410044, 0x1b410042, 0x1b410040, 0x1b41003e,
2454 0x1b41003c, 0x1b41003b, 0x1b410039, 0x1b410037,
2455 0x1a410044, 0x1a410042, 0x1a410040, 0x1a41003e,
2456 0x1a41003c, 0x1a41003b, 0x1a410039, 0x1a410037,
2457 0x19410044, 0x19410042, 0x19410040, 0x1941003e,
2458 0x1941003c, 0x1941003b, 0x19410039, 0x19410037,
2459 0x18410044, 0x18410042, 0x18410040, 0x1841003e,
2460 0x1841003c, 0x1841003b, 0x18410039, 0x18410037,
2461 0x17410044, 0x17410042, 0x17410040, 0x1741003e,
2462 0x1741003c, 0x1741003b, 0x17410039, 0x17410037,
2463 0x16410044, 0x16410042, 0x16410040, 0x1641003e,
2464 0x1641003c, 0x1641003b, 0x16410039, 0x16410037,
2465 0x15410044, 0x15410042, 0x15410040, 0x1541003e,
2466 0x1541003c, 0x1541003b, 0x15410039, 0x15410037,
2467 0x14410044, 0x14410042, 0x14410040, 0x1441003e,
2468 0x1441003c, 0x1441003b, 0x14410039, 0x14410037,
2469 0x13410044, 0x13410042, 0x13410040, 0x1341003e,
2470 0x1341003c, 0x1341003b, 0x13410039, 0x13410037,
2471 0x12410044, 0x12410042, 0x12410040, 0x1241003e,
2472 0x1241003c, 0x1241003b, 0x12410039, 0x12410037,
2473 0x11410044, 0x11410042, 0x11410040, 0x1141003e,
2474 0x1141003c, 0x1141003b, 0x11410039, 0x11410037,
2475 0x10410044, 0x10410042, 0x10410040, 0x1041003e,
2476 0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
2477};
2478
2479const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
2480 0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e,
2481 0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037,
2482 0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e,
2483 0xcef7003c, 0xcef7003b, 0xcef70039, 0xcef70037,
2484 0xcdf70044, 0xcdf70042, 0xcdf70040, 0xcdf7003e,
2485 0xcdf7003c, 0xcdf7003b, 0xcdf70039, 0xcdf70037,
2486 0xccf70044, 0xccf70042, 0xccf70040, 0xccf7003e,
2487 0xccf7003c, 0xccf7003b, 0xccf70039, 0xccf70037,
2488 0xcbf70044, 0xcbf70042, 0xcbf70040, 0xcbf7003e,
2489 0xcbf7003c, 0xcbf7003b, 0xcbf70039, 0xcbf70037,
2490 0xcaf70044, 0xcaf70042, 0xcaf70040, 0xcaf7003e,
2491 0xcaf7003c, 0xcaf7003b, 0xcaf70039, 0xcaf70037,
2492 0xc9f70044, 0xc9f70042, 0xc9f70040, 0xc9f7003e,
2493 0xc9f7003c, 0xc9f7003b, 0xc9f70039, 0xc9f70037,
2494 0xc8f70044, 0xc8f70042, 0xc8f70040, 0xc8f7003e,
2495 0xc8f7003c, 0xc8f7003b, 0xc8f70039, 0xc8f70037,
2496 0xc7f70044, 0xc7f70042, 0xc7f70040, 0xc7f7003e,
2497 0xc7f7003c, 0xc7f7003b, 0xc7f70039, 0xc7f70037,
2498 0xc6f70044, 0xc6f70042, 0xc6f70040, 0xc6f7003e,
2499 0xc6f7003c, 0xc6f7003b, 0xc6f70039, 0xc6f70037,
2500 0xc5f70044, 0xc5f70042, 0xc5f70040, 0xc5f7003e,
2501 0xc5f7003c, 0xc5f7003b, 0xc5f70039, 0xc5f70037,
2502 0xc4f70044, 0xc4f70042, 0xc4f70040, 0xc4f7003e,
2503 0xc4f7003c, 0xc4f7003b, 0xc4f70039, 0xc4f70037,
2504 0xc3f70044, 0xc3f70042, 0xc3f70040, 0xc3f7003e,
2505 0xc3f7003c, 0xc3f7003b, 0xc3f70039, 0xc3f70037,
2506 0xc2f70044, 0xc2f70042, 0xc2f70040, 0xc2f7003e,
2507 0xc2f7003c, 0xc2f7003b, 0xc2f70039, 0xc2f70037,
2508 0xc1f70044, 0xc1f70042, 0xc1f70040, 0xc1f7003e,
2509 0xc1f7003c, 0xc1f7003b, 0xc1f70039, 0xc1f70037,
2510 0xc0f70044, 0xc0f70042, 0xc0f70040, 0xc0f7003e,
2511 0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037,
2512};
2513
2514const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
2515 0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e,
2516 0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037,
2517 0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e,
2518 0x2ef2003c, 0x2ef2003b, 0x2ef20039, 0x2ef20037,
2519 0x2df20044, 0x2df20042, 0x2df20040, 0x2df2003e,
2520 0x2df2003c, 0x2df2003b, 0x2df20039, 0x2df20037,
2521 0x2cf20044, 0x2cf20042, 0x2cf20040, 0x2cf2003e,
2522 0x2cf2003c, 0x2cf2003b, 0x2cf20039, 0x2cf20037,
2523 0x2bf20044, 0x2bf20042, 0x2bf20040, 0x2bf2003e,
2524 0x2bf2003c, 0x2bf2003b, 0x2bf20039, 0x2bf20037,
2525 0x2af20044, 0x2af20042, 0x2af20040, 0x2af2003e,
2526 0x2af2003c, 0x2af2003b, 0x2af20039, 0x2af20037,
2527 0x29f20044, 0x29f20042, 0x29f20040, 0x29f2003e,
2528 0x29f2003c, 0x29f2003b, 0x29f20039, 0x29f20037,
2529 0x28f20044, 0x28f20042, 0x28f20040, 0x28f2003e,
2530 0x28f2003c, 0x28f2003b, 0x28f20039, 0x28f20037,
2531 0x27f20044, 0x27f20042, 0x27f20040, 0x27f2003e,
2532 0x27f2003c, 0x27f2003b, 0x27f20039, 0x27f20037,
2533 0x26f20044, 0x26f20042, 0x26f20040, 0x26f2003e,
2534 0x26f2003c, 0x26f2003b, 0x26f20039, 0x26f20037,
2535 0x25f20044, 0x25f20042, 0x25f20040, 0x25f2003e,
2536 0x25f2003c, 0x25f2003b, 0x25f20039, 0x25f20037,
2537 0x24f20044, 0x24f20042, 0x24f20040, 0x24f2003e,
2538 0x24f2003c, 0x24f2003b, 0x24f20039, 0x24f20038,
2539 0x23f20041, 0x23f20040, 0x23f2003f, 0x23f2003e,
2540 0x23f2003c, 0x23f2003b, 0x23f20039, 0x23f20037,
2541 0x22f20044, 0x22f20042, 0x22f20040, 0x22f2003e,
2542 0x22f2003c, 0x22f2003b, 0x22f20039, 0x22f20037,
2543 0x21f20044, 0x21f20042, 0x21f20040, 0x21f2003e,
2544 0x21f2003c, 0x21f2003b, 0x21f20039, 0x21f20037,
2545 0x20d20043, 0x20d20041, 0x20d2003e, 0x20d2003c,
2546 0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
2547};
2548
2549const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
2550 0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
2551 0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
2552 0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e,
2553 0x0e62003c, 0x0e62003d, 0x0e62003b, 0x0e62003a,
2554 0x0d620043, 0x0d620041, 0x0d620040, 0x0d62003e,
2555 0x0d62003d, 0x0d62003c, 0x0d62003b, 0x0d62003a,
2556 0x0c620041, 0x0c620040, 0x0c62003f, 0x0c62003e,
2557 0x0c62003c, 0x0c62003b, 0x0c620039, 0x0c620037,
2558 0x0b620046, 0x0b620044, 0x0b620042, 0x0b620040,
2559 0x0b62003e, 0x0b62003c, 0x0b62003b, 0x0b62003a,
2560 0x0a620041, 0x0a620040, 0x0a62003e, 0x0a62003c,
2561 0x0a62003b, 0x0a62003a, 0x0a620039, 0x0a620038,
2562 0x0962003e, 0x0962003d, 0x0962003c, 0x0962003b,
2563 0x09620039, 0x09620037, 0x09620035, 0x09620033,
2564 0x08620044, 0x08620042, 0x08620040, 0x0862003e,
2565 0x0862003c, 0x0862003b, 0x0862003a, 0x08620039,
2566 0x07620043, 0x07620042, 0x07620040, 0x0762003f,
2567 0x0762003d, 0x0762003b, 0x0762003a, 0x07620039,
2568 0x0662003e, 0x0662003d, 0x0662003c, 0x0662003b,
2569 0x06620039, 0x06620037, 0x06620035, 0x06620033,
2570 0x05620046, 0x05620044, 0x05620042, 0x05620040,
2571 0x0562003e, 0x0562003c, 0x0562003b, 0x05620039,
2572 0x04620044, 0x04620042, 0x04620040, 0x0462003e,
2573 0x0462003c, 0x0462003b, 0x04620039, 0x04620038,
2574 0x0362003c, 0x0362003b, 0x0362003a, 0x03620039,
2575 0x03620038, 0x03620037, 0x03620035, 0x03620033,
2576 0x0262004c, 0x0262004a, 0x02620048, 0x02620047,
2577 0x02620046, 0x02620044, 0x02620043, 0x02620042,
2578 0x0162004a, 0x01620048, 0x01620046, 0x01620044,
2579 0x01620043, 0x01620042, 0x01620041, 0x01620040,
2580 0x00620042, 0x00620040, 0x0062003e, 0x0062003c,
2581 0x0062003b, 0x00620039, 0x00620037, 0x00620035,
2582};
2583
2584const u32 txpwrctrl_tx_gain_ipa[] = {
2585 0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029,
2586 0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025,
2587 0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029,
2588 0x5ef70028, 0x5ef70027, 0x5ef70026, 0x5ef70025,
2589 0x5df7002d, 0x5df7002b, 0x5df7002a, 0x5df70029,
2590 0x5df70028, 0x5df70027, 0x5df70026, 0x5df70025,
2591 0x5cf7002d, 0x5cf7002b, 0x5cf7002a, 0x5cf70029,
2592 0x5cf70028, 0x5cf70027, 0x5cf70026, 0x5cf70025,
2593 0x5bf7002d, 0x5bf7002b, 0x5bf7002a, 0x5bf70029,
2594 0x5bf70028, 0x5bf70027, 0x5bf70026, 0x5bf70025,
2595 0x5af7002d, 0x5af7002b, 0x5af7002a, 0x5af70029,
2596 0x5af70028, 0x5af70027, 0x5af70026, 0x5af70025,
2597 0x59f7002d, 0x59f7002b, 0x59f7002a, 0x59f70029,
2598 0x59f70028, 0x59f70027, 0x59f70026, 0x59f70025,
2599 0x58f7002d, 0x58f7002b, 0x58f7002a, 0x58f70029,
2600 0x58f70028, 0x58f70027, 0x58f70026, 0x58f70025,
2601 0x57f7002d, 0x57f7002b, 0x57f7002a, 0x57f70029,
2602 0x57f70028, 0x57f70027, 0x57f70026, 0x57f70025,
2603 0x56f7002d, 0x56f7002b, 0x56f7002a, 0x56f70029,
2604 0x56f70028, 0x56f70027, 0x56f70026, 0x56f70025,
2605 0x55f7002d, 0x55f7002b, 0x55f7002a, 0x55f70029,
2606 0x55f70028, 0x55f70027, 0x55f70026, 0x55f70025,
2607 0x54f7002d, 0x54f7002b, 0x54f7002a, 0x54f70029,
2608 0x54f70028, 0x54f70027, 0x54f70026, 0x54f70025,
2609 0x53f7002d, 0x53f7002b, 0x53f7002a, 0x53f70029,
2610 0x53f70028, 0x53f70027, 0x53f70026, 0x53f70025,
2611 0x52f7002d, 0x52f7002b, 0x52f7002a, 0x52f70029,
2612 0x52f70028, 0x52f70027, 0x52f70026, 0x52f70025,
2613 0x51f7002d, 0x51f7002b, 0x51f7002a, 0x51f70029,
2614 0x51f70028, 0x51f70027, 0x51f70026, 0x51f70025,
2615 0x50f7002d, 0x50f7002b, 0x50f7002a, 0x50f70029,
2616 0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025,
2617};
2618
2619const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
2620 0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029,
2621 0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025,
2622 0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029,
2623 0x1ef70028, 0x1ef70027, 0x1ef70026, 0x1ef70025,
2624 0x1df7002d, 0x1df7002b, 0x1df7002a, 0x1df70029,
2625 0x1df70028, 0x1df70027, 0x1df70026, 0x1df70025,
2626 0x1cf7002d, 0x1cf7002b, 0x1cf7002a, 0x1cf70029,
2627 0x1cf70028, 0x1cf70027, 0x1cf70026, 0x1cf70025,
2628 0x1bf7002d, 0x1bf7002b, 0x1bf7002a, 0x1bf70029,
2629 0x1bf70028, 0x1bf70027, 0x1bf70026, 0x1bf70025,
2630 0x1af7002d, 0x1af7002b, 0x1af7002a, 0x1af70029,
2631 0x1af70028, 0x1af70027, 0x1af70026, 0x1af70025,
2632 0x19f7002d, 0x19f7002b, 0x19f7002a, 0x19f70029,
2633 0x19f70028, 0x19f70027, 0x19f70026, 0x19f70025,
2634 0x18f7002d, 0x18f7002b, 0x18f7002a, 0x18f70029,
2635 0x18f70028, 0x18f70027, 0x18f70026, 0x18f70025,
2636 0x17f7002d, 0x17f7002b, 0x17f7002a, 0x17f70029,
2637 0x17f70028, 0x17f70027, 0x17f70026, 0x17f70025,
2638 0x16f7002d, 0x16f7002b, 0x16f7002a, 0x16f70029,
2639 0x16f70028, 0x16f70027, 0x16f70026, 0x16f70025,
2640 0x15f7002d, 0x15f7002b, 0x15f7002a, 0x15f70029,
2641 0x15f70028, 0x15f70027, 0x15f70026, 0x15f70025,
2642 0x14f7002d, 0x14f7002b, 0x14f7002a, 0x14f70029,
2643 0x14f70028, 0x14f70027, 0x14f70026, 0x14f70025,
2644 0x13f7002d, 0x13f7002b, 0x13f7002a, 0x13f70029,
2645 0x13f70028, 0x13f70027, 0x13f70026, 0x13f70025,
2646 0x12f7002d, 0x12f7002b, 0x12f7002a, 0x12f70029,
2647 0x12f70028, 0x12f70027, 0x12f70026, 0x12f70025,
2648 0x11f7002d, 0x11f7002b, 0x11f7002a, 0x11f70029,
2649 0x11f70028, 0x11f70027, 0x11f70026, 0x11f70025,
2650 0x10f7002d, 0x10f7002b, 0x10f7002a, 0x10f70029,
2651 0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025,
2652};
2653
2654const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
2655 0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029,
2656 0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025,
2657 0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029,
2658 0x0ef70028, 0x0ef70027, 0x0ef70026, 0x0ef70025,
2659 0x0df7002d, 0x0df7002b, 0x0df7002a, 0x0df70029,
2660 0x0df70028, 0x0df70027, 0x0df70026, 0x0df70025,
2661 0x0cf7002d, 0x0cf7002b, 0x0cf7002a, 0x0cf70029,
2662 0x0cf70028, 0x0cf70027, 0x0cf70026, 0x0cf70025,
2663 0x0bf7002d, 0x0bf7002b, 0x0bf7002a, 0x0bf70029,
2664 0x0bf70028, 0x0bf70027, 0x0bf70026, 0x0bf70025,
2665 0x0af7002d, 0x0af7002b, 0x0af7002a, 0x0af70029,
2666 0x0af70028, 0x0af70027, 0x0af70026, 0x0af70025,
2667 0x09f7002d, 0x09f7002b, 0x09f7002a, 0x09f70029,
2668 0x09f70028, 0x09f70027, 0x09f70026, 0x09f70025,
2669 0x08f7002d, 0x08f7002b, 0x08f7002a, 0x08f70029,
2670 0x08f70028, 0x08f70027, 0x08f70026, 0x08f70025,
2671 0x07f7002d, 0x07f7002b, 0x07f7002a, 0x07f70029,
2672 0x07f70028, 0x07f70027, 0x07f70026, 0x07f70025,
2673 0x06f7002d, 0x06f7002b, 0x06f7002a, 0x06f70029,
2674 0x06f70028, 0x06f70027, 0x06f70026, 0x06f70025,
2675 0x05f7002d, 0x05f7002b, 0x05f7002a, 0x05f70029,
2676 0x05f70028, 0x05f70027, 0x05f70026, 0x05f70025,
2677 0x04f7002d, 0x04f7002b, 0x04f7002a, 0x04f70029,
2678 0x04f70028, 0x04f70027, 0x04f70026, 0x04f70025,
2679 0x03f7002d, 0x03f7002b, 0x03f7002a, 0x03f70029,
2680 0x03f70028, 0x03f70027, 0x03f70026, 0x03f70025,
2681 0x02f7002d, 0x02f7002b, 0x02f7002a, 0x02f70029,
2682 0x02f70028, 0x02f70027, 0x02f70026, 0x02f70025,
2683 0x01f7002d, 0x01f7002b, 0x01f7002a, 0x01f70029,
2684 0x01f70028, 0x01f70027, 0x01f70026, 0x01f70025,
2685 0x00f7002d, 0x00f7002b, 0x00f7002a, 0x00f70029,
2686 0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
2687};
2688
2689const u32 txpwrctrl_tx_gain_ipa_5g[] = {
2690 0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031,
2691 0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b,
2692 0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027,
2693 0x7ff70026, 0x7ff70024, 0x7ff70023, 0x7ff70022,
2694 0x7ef70028, 0x7ef70027, 0x7ef70026, 0x7ef70025,
2695 0x7ef70024, 0x7ef70023, 0x7df70028, 0x7df70027,
2696 0x7df70026, 0x7df70025, 0x7df70024, 0x7df70023,
2697 0x7df70022, 0x7cf70029, 0x7cf70028, 0x7cf70027,
2698 0x7cf70026, 0x7cf70025, 0x7cf70023, 0x7cf70022,
2699 0x7bf70029, 0x7bf70028, 0x7bf70026, 0x7bf70025,
2700 0x7bf70024, 0x7bf70023, 0x7bf70022, 0x7bf70021,
2701 0x7af70029, 0x7af70028, 0x7af70027, 0x7af70026,
2702 0x7af70025, 0x7af70024, 0x7af70023, 0x7af70022,
2703 0x79f70029, 0x79f70028, 0x79f70027, 0x79f70026,
2704 0x79f70025, 0x79f70024, 0x79f70023, 0x79f70022,
2705 0x78f70029, 0x78f70028, 0x78f70027, 0x78f70026,
2706 0x78f70025, 0x78f70024, 0x78f70023, 0x78f70022,
2707 0x77f70029, 0x77f70028, 0x77f70027, 0x77f70026,
2708 0x77f70025, 0x77f70024, 0x77f70023, 0x77f70022,
2709 0x76f70029, 0x76f70028, 0x76f70027, 0x76f70026,
2710 0x76f70024, 0x76f70023, 0x76f70022, 0x76f70021,
2711 0x75f70029, 0x75f70028, 0x75f70027, 0x75f70026,
2712 0x75f70025, 0x75f70024, 0x75f70023, 0x74f70029,
2713 0x74f70028, 0x74f70026, 0x74f70025, 0x74f70024,
2714 0x74f70023, 0x74f70022, 0x73f70029, 0x73f70027,
2715 0x73f70026, 0x73f70025, 0x73f70024, 0x73f70023,
2716 0x73f70022, 0x72f70028, 0x72f70027, 0x72f70026,
2717 0x72f70025, 0x72f70024, 0x72f70023, 0x72f70022,
2718 0x71f70028, 0x71f70027, 0x71f70026, 0x71f70025,
2719 0x71f70024, 0x71f70023, 0x70f70028, 0x70f70027,
2720 0x70f70026, 0x70f70024, 0x70f70023, 0x70f70022,
2721 0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f,
2722};
2723
2724const u16 tbl_iqcal_gainparams[2][9][8] = {
2725 {
2726 { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 },
2727 { 0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69 },
2728 { 0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68 },
2729 { 0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67 },
2730 { 0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66 },
2731 { 0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65 },
2732 { 0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65 },
2733 { 0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65 },
2734 { 0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65 }
2735 },
2736 {
2737 { 0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
2738 { 0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
2739 { 0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79 },
2740 { 0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78 },
2741 { 0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78 },
2742 { 0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78 },
2743 { 0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78 },
2744 { 0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78 },
2745 { 0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78 }
2746 }
2747};
2748
2749const struct nphy_txiqcal_ladder ladder_lo[] = {
2750 { 3, 0 },
2751 { 4, 0 },
2752 { 6, 0 },
2753 { 9, 0 },
2754 { 13, 0 },
2755 { 18, 0 },
2756 { 25, 0 },
2757 { 25, 1 },
2758 { 25, 2 },
2759 { 25, 3 },
2760 { 25, 4 },
2761 { 25, 5 },
2762 { 25, 6 },
2763 { 25, 7 },
2764 { 35, 7 },
2765 { 50, 7 },
2766 { 71, 7 },
2767 { 100, 7 }
2768};
2769
2770const struct nphy_txiqcal_ladder ladder_iq[] = {
2771 { 3, 0 },
2772 { 4, 0 },
2773 { 6, 0 },
2774 { 9, 0 },
2775 { 13, 0 },
2776 { 18, 0 },
2777 { 25, 0 },
2778 { 35, 0 },
2779 { 50, 0 },
2780 { 71, 0 },
2781 { 100, 0 },
2782 { 100, 1 },
2783 { 100, 2 },
2784 { 100, 3 },
2785 { 100, 4 },
2786 { 100, 5 },
2787 { 100, 6 },
2788 { 100, 7 }
2789};
2790
2791const u16 loscale[] = {
2792 256, 256, 271, 271,
2793 287, 256, 256, 271,
2794 271, 287, 287, 304,
2795 304, 256, 256, 271,
2796 271, 287, 287, 304,
2797 304, 322, 322, 341,
2798 341, 362, 362, 383,
2799 383, 256, 256, 271,
2800 271, 287, 287, 304,
2801 304, 322, 322, 256,
2802 256, 271, 271, 287,
2803 287, 304, 304, 322,
2804 322, 341, 341, 362,
2805 362, 256, 256, 271,
2806 271, 287, 287, 304,
2807 304, 322, 322, 256,
2808 256, 271, 271, 287,
2809 287, 304, 304, 322,
2810 322, 341, 341, 362,
2811 362, 256, 256, 271,
2812 271, 287, 287, 304,
2813 304, 322, 322, 341,
2814 341, 362, 362, 383,
2815 383, 406, 406, 430,
2816 430, 455, 455, 482,
2817 482, 511, 511, 541,
2818 541, 573, 573, 607,
2819 607, 643, 643, 681,
2820 681, 722, 722, 764,
2821 764, 810, 810, 858,
2822 858, 908, 908, 962,
2823 962, 1019, 1019, 256
2824};
2825
2826const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
2827 0x0200, 0x0300, 0x0400, 0x0700,
2828 0x0900, 0x0c00, 0x1200, 0x1201,
2829 0x1202, 0x1203, 0x1204, 0x1205,
2830 0x1206, 0x1207, 0x1907, 0x2307,
2831 0x3207, 0x4707
2832};
2833
2834const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
2835 0x0300, 0x0500, 0x0700, 0x0900,
2836 0x0d00, 0x1100, 0x1900, 0x1901,
2837 0x1902, 0x1903, 0x1904, 0x1905,
2838 0x1906, 0x1907, 0x2407, 0x3207,
2839 0x4607, 0x6407
2840};
2841
2842const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
2843 0x0100, 0x0200, 0x0400, 0x0700,
2844 0x0900, 0x0c00, 0x1200, 0x1900,
2845 0x2300, 0x3200, 0x4700, 0x4701,
2846 0x4702, 0x4703, 0x4704, 0x4705,
2847 0x4706, 0x4707
2848};
2849
2850const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
2851 0x0200, 0x0300, 0x0600, 0x0900,
2852 0x0d00, 0x1100, 0x1900, 0x2400,
2853 0x3200, 0x4600, 0x6400, 0x6401,
2854 0x6402, 0x6403, 0x6404, 0x6405,
2855 0x6406, 0x6407
2856};
2857
2858const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3] = { };
2859
2860const u16 tbl_tx_iqlo_cal_startcoefs[B43_NTAB_TX_IQLO_CAL_STARTCOEFS] = { };
2861
2862const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
2863 0x8423, 0x8323, 0x8073, 0x8256,
2864 0x8045, 0x8223, 0x9423, 0x9323,
2865 0x9073, 0x9256, 0x9045, 0x9223
2866};
2867
2868const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
2869 0x8101, 0x8253, 0x8053, 0x8234,
2870 0x8034, 0x9101, 0x9253, 0x9053,
2871 0x9234, 0x9034
2872};
2873
2874const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
2875 0x8123, 0x8264, 0x8086, 0x8245,
2876 0x8056, 0x9123, 0x9264, 0x9086,
2877 0x9245, 0x9056
2878};
2879
2880const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
2881 0x8434, 0x8334, 0x8084, 0x8267,
2882 0x8056, 0x8234, 0x9434, 0x9334,
2883 0x9084, 0x9267, 0x9056, 0x9234
2884};
2885
2409static inline void assert_ntab_array_sizes(void) 2886static inline void assert_ntab_array_sizes(void)
2410{ 2887{
2411#undef check 2888#undef check
@@ -2474,3 +2951,51 @@ void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
2474 /* Some compiletime assertions... */ 2951 /* Some compiletime assertions... */
2475 assert_ntab_array_sizes(); 2952 assert_ntab_array_sizes();
2476} 2953}
2954
2955#define ntab_upload(dev, offset, data) do { \
2956 unsigned int i; \
2957 for (i = 0; i < (offset##_SIZE); i++) \
2958 b43_ntab_write(dev, (offset) + i, (data)[i]); \
2959 } while (0)
2960
2961void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
2962{
2963 /* Static tables */
2964 ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
2965 ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
2966 ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
2967 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
2968 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
2969 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
2970 ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
2971 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
2972 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
2973 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
2974 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
2975 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
2976 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
2977 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
2978
2979 /* Volatile tables */
2980 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
2981 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
2982 ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
2983 ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
2984 ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
2985 ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
2986 ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
2987 ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
2988 ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
2989 ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
2990 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
2991 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
2992}
2993
2994void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
2995{
2996 /* Static tables */
2997 /* TODO */
2998
2999 /* Volatile tables */
3000 /* TODO */
3001}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4d498b053ec..51636d02f8b 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -46,6 +46,11 @@ struct b43_nphy_channeltab_entry {
46 46
47struct b43_wldev; 47struct b43_wldev;
48 48
49struct nphy_txiqcal_ladder {
50 u8 percent;
51 u8 g_env;
52};
53
49/* Upload the default register value table. 54/* Upload the default register value table.
50 * If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz 55 * If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz
51 * table is uploaded. If "ignore_uploadflag" is true, we upload any value 56 * table is uploaded. If "ignore_uploadflag" is true, we upload any value
@@ -126,34 +131,46 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
126#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ 131#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
127#define B43_NTAB_C1_LOFEEDTH_SIZE 128 132#define B43_NTAB_C1_LOFEEDTH_SIZE 128
128 133
129void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value); 134#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
135#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
136#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
137#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_20_SIZE 18
138#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3 11
139#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS 9
140#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3 12
141#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL 10
142#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL 10
143#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3 12
130 144
131extern const u8 b43_ntab_adjustpower0[]; 145void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value);
132extern const u8 b43_ntab_adjustpower1[];
133extern const u16 b43_ntab_bdi[];
134extern const u32 b43_ntab_channelest[];
135extern const u8 b43_ntab_estimatepowerlt0[];
136extern const u8 b43_ntab_estimatepowerlt1[];
137extern const u8 b43_ntab_framelookup[];
138extern const u32 b43_ntab_framestruct[];
139extern const u32 b43_ntab_gainctl0[];
140extern const u32 b43_ntab_gainctl1[];
141extern const u32 b43_ntab_intlevel[];
142extern const u32 b43_ntab_iqlt0[];
143extern const u32 b43_ntab_iqlt1[];
144extern const u16 b43_ntab_loftlt0[];
145extern const u16 b43_ntab_loftlt1[];
146extern const u8 b43_ntab_mcs[];
147extern const u32 b43_ntab_noisevar10[];
148extern const u32 b43_ntab_noisevar11[];
149extern const u16 b43_ntab_pilot[];
150extern const u32 b43_ntab_pilotlt[];
151extern const u32 b43_ntab_tdi20a0[];
152extern const u32 b43_ntab_tdi20a1[];
153extern const u32 b43_ntab_tdi40a0[];
154extern const u32 b43_ntab_tdi40a1[];
155extern const u32 b43_ntab_tdtrn[];
156extern const u32 b43_ntab_tmap[];
157 146
147void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev);
148void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev);
149
150extern const u32 b43_ntab_tx_gain_rev0_1_2[];
151extern const u32 b43_ntab_tx_gain_rev3plus_2ghz[];
152extern const u32 b43_ntab_tx_gain_rev3_5ghz[];
153extern const u32 b43_ntab_tx_gain_rev4_5ghz[];
154extern const u32 b43_ntab_tx_gain_rev5plus_5ghz[];
155
156extern const u32 txpwrctrl_tx_gain_ipa[];
157extern const u32 txpwrctrl_tx_gain_ipa_rev5[];
158extern const u32 txpwrctrl_tx_gain_ipa_rev6[];
159extern const u32 txpwrctrl_tx_gain_ipa_5g[];
160extern const u16 tbl_iqcal_gainparams[2][9][8];
161extern const struct nphy_txiqcal_ladder ladder_lo[];
162extern const struct nphy_txiqcal_ladder ladder_iq[];
163extern const u16 loscale[];
164
165extern const u16 tbl_tx_iqlo_cal_loft_ladder_40[];
166extern const u16 tbl_tx_iqlo_cal_loft_ladder_20[];
167extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[];
168extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[];
169extern const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[];
170extern const u16 tbl_tx_iqlo_cal_startcoefs[];
171extern const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[];
172extern const u16 tbl_tx_iqlo_cal_cmds_recal[];
173extern const u16 tbl_tx_iqlo_cal_cmds_fullcal[];
174extern const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[];
158 175
159#endif /* B43_TABLES_NPHY_H_ */ 176#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4a905b6a886..874a64a6c61 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -61,6 +61,8 @@ MODULE_AUTHOR("Michael Buesch");
61MODULE_LICENSE("GPL"); 61MODULE_LICENSE("GPL");
62 62
63MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID); 63MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
64MODULE_FIRMWARE("b43legacy/ucode2.fw");
65MODULE_FIRMWARE("b43legacy/ucode4.fw");
64 66
65#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) 67#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
66static int modparam_pio; 68static int modparam_pio;
@@ -3361,7 +3363,7 @@ err_kfree_lo_control:
3361} 3363}
3362 3364
3363static int b43legacy_op_add_interface(struct ieee80211_hw *hw, 3365static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3364 struct ieee80211_if_init_conf *conf) 3366 struct ieee80211_vif *vif)
3365{ 3367{
3366 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3368 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3367 struct b43legacy_wldev *dev; 3369 struct b43legacy_wldev *dev;
@@ -3370,23 +3372,23 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3370 3372
3371 /* TODO: allow WDS/AP devices to coexist */ 3373 /* TODO: allow WDS/AP devices to coexist */
3372 3374
3373 if (conf->type != NL80211_IFTYPE_AP && 3375 if (vif->type != NL80211_IFTYPE_AP &&
3374 conf->type != NL80211_IFTYPE_STATION && 3376 vif->type != NL80211_IFTYPE_STATION &&
3375 conf->type != NL80211_IFTYPE_WDS && 3377 vif->type != NL80211_IFTYPE_WDS &&
3376 conf->type != NL80211_IFTYPE_ADHOC) 3378 vif->type != NL80211_IFTYPE_ADHOC)
3377 return -EOPNOTSUPP; 3379 return -EOPNOTSUPP;
3378 3380
3379 mutex_lock(&wl->mutex); 3381 mutex_lock(&wl->mutex);
3380 if (wl->operating) 3382 if (wl->operating)
3381 goto out_mutex_unlock; 3383 goto out_mutex_unlock;
3382 3384
3383 b43legacydbg(wl, "Adding Interface type %d\n", conf->type); 3385 b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
3384 3386
3385 dev = wl->current_dev; 3387 dev = wl->current_dev;
3386 wl->operating = 1; 3388 wl->operating = 1;
3387 wl->vif = conf->vif; 3389 wl->vif = vif;
3388 wl->if_type = conf->type; 3390 wl->if_type = vif->type;
3389 memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); 3391 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
3390 3392
3391 spin_lock_irqsave(&wl->irq_lock, flags); 3393 spin_lock_irqsave(&wl->irq_lock, flags);
3392 b43legacy_adjust_opmode(dev); 3394 b43legacy_adjust_opmode(dev);
@@ -3403,18 +3405,18 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3403} 3405}
3404 3406
3405static void b43legacy_op_remove_interface(struct ieee80211_hw *hw, 3407static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
3406 struct ieee80211_if_init_conf *conf) 3408 struct ieee80211_vif *vif)
3407{ 3409{
3408 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3410 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3409 struct b43legacy_wldev *dev = wl->current_dev; 3411 struct b43legacy_wldev *dev = wl->current_dev;
3410 unsigned long flags; 3412 unsigned long flags;
3411 3413
3412 b43legacydbg(wl, "Removing Interface type %d\n", conf->type); 3414 b43legacydbg(wl, "Removing Interface type %d\n", vif->type);
3413 3415
3414 mutex_lock(&wl->mutex); 3416 mutex_lock(&wl->mutex);
3415 3417
3416 B43legacy_WARN_ON(!wl->operating); 3418 B43legacy_WARN_ON(!wl->operating);
3417 B43legacy_WARN_ON(wl->vif != conf->vif); 3419 B43legacy_WARN_ON(wl->vif != vif);
3418 wl->vif = NULL; 3420 wl->vif = NULL;
3419 3421
3420 wl->operating = 0; 3422 wl->operating = 0;
@@ -3960,7 +3962,7 @@ static struct ssb_driver b43legacy_ssb_driver = {
3960 3962
3961static void b43legacy_print_driverinfo(void) 3963static void b43legacy_print_driverinfo(void)
3962{ 3964{
3963 const char *feat_pci = "", *feat_leds = "", *feat_rfkill = "", 3965 const char *feat_pci = "", *feat_leds = "",
3964 *feat_pio = "", *feat_dma = ""; 3966 *feat_pio = "", *feat_dma = "";
3965 3967
3966#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT 3968#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
@@ -3969,9 +3971,6 @@ static void b43legacy_print_driverinfo(void)
3969#ifdef CONFIG_B43LEGACY_LEDS 3971#ifdef CONFIG_B43LEGACY_LEDS
3970 feat_leds = "L"; 3972 feat_leds = "L";
3971#endif 3973#endif
3972#ifdef CONFIG_B43LEGACY_RFKILL
3973 feat_rfkill = "R";
3974#endif
3975#ifdef CONFIG_B43LEGACY_PIO 3974#ifdef CONFIG_B43LEGACY_PIO
3976 feat_pio = "I"; 3975 feat_pio = "I";
3977#endif 3976#endif
@@ -3979,9 +3978,9 @@ static void b43legacy_print_driverinfo(void)
3979 feat_dma = "D"; 3978 feat_dma = "D";
3980#endif 3979#endif
3981 printk(KERN_INFO "Broadcom 43xx-legacy driver loaded " 3980 printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
3982 "[ Features: %s%s%s%s%s, Firmware-ID: " 3981 "[ Features: %s%s%s%s, Firmware-ID: "
3983 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n", 3982 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
3984 feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma); 3983 feat_pci, feat_leds, feat_pio, feat_dma);
3985} 3984}
3986 3985
3987static int __init b43legacy_init(void) 3986static int __init b43legacy_init(void)
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ff9b5c88218..d7073281942 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2618,6 +2618,15 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
2618 int events = 0; 2618 int events = 0;
2619 u16 ev; 2619 u16 ev;
2620 2620
2621 /* Detect early interrupt before driver is fully configued */
2622 if (!dev->base_addr) {
2623 if (net_ratelimit()) {
2624 printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
2625 dev->name);
2626 }
2627 return IRQ_HANDLED;
2628 }
2629
2621 iface = netdev_priv(dev); 2630 iface = netdev_priv(dev);
2622 local = iface->local; 2631 local = iface->local;
2623 2632
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 8fdd41f4b4f..4d97ae37499 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -39,7 +39,7 @@ struct hostap_pci_priv {
39/* FIX: do we need mb/wmb/rmb with memory operations? */ 39/* FIX: do we need mb/wmb/rmb with memory operations? */
40 40
41 41
42static struct pci_device_id prism2_pci_id_table[] __devinitdata = { 42static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
43 /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */ 43 /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
44 { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID }, 44 { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
45 /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */ 45 /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0e5d51086a4..fc04ccdc5be 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -60,7 +60,7 @@ struct hostap_plx_priv {
60 60
61#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID } 61#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
62 62
63static struct pci_device_id prism2_plx_id_table[] __devinitdata = { 63static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = {
64 PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"), 64 PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
65 PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"), 65 PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
66 PLXDEV(0x126c, 0x8030, "Nortel emobility"), 66 PLXDEV(0x126c, 0x8030, "Nortel emobility"),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 56afcf041f8..9b72c45a774 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6585,7 +6585,7 @@ static void ipw2100_shutdown(struct pci_dev *pci_dev)
6585 6585
6586#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x } 6586#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
6587 6587
6588static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = { 6588static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = {
6589 IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */ 6589 IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
6590 IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */ 6590 IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
6591 IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */ 6591 IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 09ddd3e6bed..63c2a7ade5f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11524,7 +11524,7 @@ out:
11524} 11524}
11525 11525
11526/* PCI driver stuff */ 11526/* PCI driver stuff */
11527static struct pci_device_id card_ids[] = { 11527static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11528 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, 11528 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11529 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, 11529 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11530 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, 11530 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8414178bcff..0db1fda94a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -105,6 +105,7 @@ static struct iwl_lib_ops iwl1000_lib = {
105 .load_ucode = iwl5000_load_ucode, 105 .load_ucode = iwl5000_load_ucode,
106 .dump_nic_event_log = iwl_dump_nic_event_log, 106 .dump_nic_event_log = iwl_dump_nic_event_log,
107 .dump_nic_error_log = iwl_dump_nic_error_log, 107 .dump_nic_error_log = iwl_dump_nic_error_log,
108 .dump_csr = iwl_dump_csr,
108 .init_alive_start = iwl5000_init_alive_start, 109 .init_alive_start = iwl5000_init_alive_start,
109 .alive_notify = iwl5000_alive_notify, 110 .alive_notify = iwl5000_alive_notify,
110 .send_tx_power = iwl5000_send_tx_power, 111 .send_tx_power = iwl5000_send_tx_power,
@@ -140,7 +141,7 @@ static struct iwl_lib_ops iwl1000_lib = {
140 }, 141 },
141}; 142};
142 143
143static struct iwl_ops iwl1000_ops = { 144static const struct iwl_ops iwl1000_ops = {
144 .ucode = &iwl5000_ucode, 145 .ucode = &iwl5000_ucode,
145 .lib = &iwl1000_lib, 146 .lib = &iwl1000_lib,
146 .hcmd = &iwl5000_hcmd, 147 .hcmd = &iwl5000_hcmd,
@@ -173,7 +174,6 @@ struct iwl_cfg iwl1000_bgn_cfg = {
173 .use_rts_for_ht = true, /* use rts/cts protection */ 174 .use_rts_for_ht = true, /* use rts/cts protection */
174 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 175 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
175 .support_ct_kill_exit = true, 176 .support_ct_kill_exit = true,
176 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
177}; 177};
178 178
179struct iwl_cfg iwl1000_bg_cfg = { 179struct iwl_cfg iwl1000_bg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 234891d8cc1..6cde661ce0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2804,7 +2804,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2804 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, 2804 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2805}; 2805};
2806 2806
2807static struct iwl_ops iwl3945_ops = { 2807static const struct iwl_ops iwl3945_ops = {
2808 .ucode = &iwl3945_ucode, 2808 .ucode = &iwl3945_ucode,
2809 .lib = &iwl3945_lib, 2809 .lib = &iwl3945_lib,
2810 .hcmd = &iwl3945_hcmd, 2810 .hcmd = &iwl3945_hcmd,
@@ -2849,7 +2849,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2849 .broken_powersave = true, 2849 .broken_powersave = true,
2850}; 2850};
2851 2851
2852struct pci_device_id iwl3945_hw_card_ids[] = { 2852DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
2853 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)}, 2853 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
2854 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)}, 2854 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
2855 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)}, 2855 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 531fa125f5a..bc532ff4f88 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -37,7 +37,7 @@
37#include <net/ieee80211_radiotap.h> 37#include <net/ieee80211_radiotap.h>
38 38
39/* Hardware specific file defines the PCI IDs table for that hardware module */ 39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern struct pci_device_id iwl3945_hw_card_ids[]; 40extern const struct pci_device_id iwl3945_hw_card_ids[];
41 41
42#include "iwl-csr.h" 42#include "iwl-csr.h"
43#include "iwl-prph.h" 43#include "iwl-prph.h"
@@ -226,7 +226,8 @@ extern void iwl3945_rx_replenish(void *data);
226extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 226extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
227extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 227extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
228 struct ieee80211_hdr *hdr,int left); 228 struct ieee80211_hdr *hdr,int left);
229extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log); 229extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
230 char **buf, bool display);
230extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); 231extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
231 232
232/* 233/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9b4b8b5c757..6a004abb597 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2208,7 +2208,7 @@ static struct iwl_lib_ops iwl4965_lib = {
2208 }, 2208 },
2209}; 2209};
2210 2210
2211static struct iwl_ops iwl4965_ops = { 2211static const struct iwl_ops iwl4965_ops = {
2212 .ucode = &iwl4965_ucode, 2212 .ucode = &iwl4965_ucode,
2213 .lib = &iwl4965_lib, 2213 .lib = &iwl4965_lib,
2214 .hcmd = &iwl4965_hcmd, 2214 .hcmd = &iwl4965_hcmd,
@@ -2239,7 +2239,6 @@ struct iwl_cfg iwl4965_agn_cfg = {
2239 .broken_powersave = true, 2239 .broken_powersave = true,
2240 .led_compensation = 61, 2240 .led_compensation = 61,
2241 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2241 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2242 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
2243}; 2242};
2244 2243
2245/* Module firmware */ 2244/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index de45f308b74..c6120f0b8f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -781,7 +781,7 @@ void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
781 781
782 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 782 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
783 783
784 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP) 784 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
785 scd_bc_tbl[txq_id]. 785 scd_bc_tbl[txq_id].
786 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 786 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
787} 787}
@@ -800,12 +800,12 @@ void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
800 if (txq_id != IWL_CMD_QUEUE_NUM) 800 if (txq_id != IWL_CMD_QUEUE_NUM)
801 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 801 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
802 802
803 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 803 bc_ent = cpu_to_le16(1 | (sta_id << 12));
804 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 804 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
805 805
806 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP) 806 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
807 scd_bc_tbl[txq_id]. 807 scd_bc_tbl[txq_id].
808 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 808 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
809} 809}
810 810
811static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, 811static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
@@ -1466,6 +1466,7 @@ struct iwl_lib_ops iwl5000_lib = {
1466 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1466 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1467 .dump_nic_event_log = iwl_dump_nic_event_log, 1467 .dump_nic_event_log = iwl_dump_nic_event_log,
1468 .dump_nic_error_log = iwl_dump_nic_error_log, 1468 .dump_nic_error_log = iwl_dump_nic_error_log,
1469 .dump_csr = iwl_dump_csr,
1469 .load_ucode = iwl5000_load_ucode, 1470 .load_ucode = iwl5000_load_ucode,
1470 .init_alive_start = iwl5000_init_alive_start, 1471 .init_alive_start = iwl5000_init_alive_start,
1471 .alive_notify = iwl5000_alive_notify, 1472 .alive_notify = iwl5000_alive_notify,
@@ -1518,6 +1519,7 @@ static struct iwl_lib_ops iwl5150_lib = {
1518 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1519 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1519 .dump_nic_event_log = iwl_dump_nic_event_log, 1520 .dump_nic_event_log = iwl_dump_nic_event_log,
1520 .dump_nic_error_log = iwl_dump_nic_error_log, 1521 .dump_nic_error_log = iwl_dump_nic_error_log,
1522 .dump_csr = iwl_dump_csr,
1521 .load_ucode = iwl5000_load_ucode, 1523 .load_ucode = iwl5000_load_ucode,
1522 .init_alive_start = iwl5000_init_alive_start, 1524 .init_alive_start = iwl5000_init_alive_start,
1523 .alive_notify = iwl5000_alive_notify, 1525 .alive_notify = iwl5000_alive_notify,
@@ -1555,7 +1557,7 @@ static struct iwl_lib_ops iwl5150_lib = {
1555 }, 1557 },
1556}; 1558};
1557 1559
1558static struct iwl_ops iwl5000_ops = { 1560static const struct iwl_ops iwl5000_ops = {
1559 .ucode = &iwl5000_ucode, 1561 .ucode = &iwl5000_ucode,
1560 .lib = &iwl5000_lib, 1562 .lib = &iwl5000_lib,
1561 .hcmd = &iwl5000_hcmd, 1563 .hcmd = &iwl5000_hcmd,
@@ -1563,7 +1565,7 @@ static struct iwl_ops iwl5000_ops = {
1563 .led = &iwlagn_led_ops, 1565 .led = &iwlagn_led_ops,
1564}; 1566};
1565 1567
1566static struct iwl_ops iwl5150_ops = { 1568static const struct iwl_ops iwl5150_ops = {
1567 .ucode = &iwl5000_ucode, 1569 .ucode = &iwl5000_ucode,
1568 .lib = &iwl5150_lib, 1570 .lib = &iwl5150_lib,
1569 .hcmd = &iwl5000_hcmd, 1571 .hcmd = &iwl5000_hcmd,
@@ -1600,7 +1602,6 @@ struct iwl_cfg iwl5300_agn_cfg = {
1600 .led_compensation = 51, 1602 .led_compensation = 51,
1601 .use_rts_for_ht = true, /* use rts/cts protection */ 1603 .use_rts_for_ht = true, /* use rts/cts protection */
1602 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1604 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1603 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1604}; 1605};
1605 1606
1606struct iwl_cfg iwl5100_bgn_cfg = { 1607struct iwl_cfg iwl5100_bgn_cfg = {
@@ -1671,7 +1672,6 @@ struct iwl_cfg iwl5100_agn_cfg = {
1671 .led_compensation = 51, 1672 .led_compensation = 51,
1672 .use_rts_for_ht = true, /* use rts/cts protection */ 1673 .use_rts_for_ht = true, /* use rts/cts protection */
1673 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1674 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1674 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1675}; 1675};
1676 1676
1677struct iwl_cfg iwl5350_agn_cfg = { 1677struct iwl_cfg iwl5350_agn_cfg = {
@@ -1696,7 +1696,6 @@ struct iwl_cfg iwl5350_agn_cfg = {
1696 .led_compensation = 51, 1696 .led_compensation = 51,
1697 .use_rts_for_ht = true, /* use rts/cts protection */ 1697 .use_rts_for_ht = true, /* use rts/cts protection */
1698 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1698 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1699 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1700}; 1699};
1701 1700
1702struct iwl_cfg iwl5150_agn_cfg = { 1701struct iwl_cfg iwl5150_agn_cfg = {
@@ -1721,7 +1720,6 @@ struct iwl_cfg iwl5150_agn_cfg = {
1721 .led_compensation = 51, 1720 .led_compensation = 51,
1722 .use_rts_for_ht = true, /* use rts/cts protection */ 1721 .use_rts_for_ht = true, /* use rts/cts protection */
1723 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1722 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1724 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1725}; 1723};
1726 1724
1727struct iwl_cfg iwl5150_abg_cfg = { 1725struct iwl_cfg iwl5150_abg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 74e57104927..a5a0ed4817a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -215,6 +215,7 @@ static struct iwl_lib_ops iwl6000_lib = {
215 .load_ucode = iwl5000_load_ucode, 215 .load_ucode = iwl5000_load_ucode,
216 .dump_nic_event_log = iwl_dump_nic_event_log, 216 .dump_nic_event_log = iwl_dump_nic_event_log,
217 .dump_nic_error_log = iwl_dump_nic_error_log, 217 .dump_nic_error_log = iwl_dump_nic_error_log,
218 .dump_csr = iwl_dump_csr,
218 .init_alive_start = iwl5000_init_alive_start, 219 .init_alive_start = iwl5000_init_alive_start,
219 .alive_notify = iwl5000_alive_notify, 220 .alive_notify = iwl5000_alive_notify,
220 .send_tx_power = iwl5000_send_tx_power, 221 .send_tx_power = iwl5000_send_tx_power,
@@ -252,7 +253,7 @@ static struct iwl_lib_ops iwl6000_lib = {
252 }, 253 },
253}; 254};
254 255
255static struct iwl_ops iwl6000_ops = { 256static const struct iwl_ops iwl6000_ops = {
256 .ucode = &iwl5000_ucode, 257 .ucode = &iwl5000_ucode,
257 .lib = &iwl6000_lib, 258 .lib = &iwl6000_lib,
258 .hcmd = &iwl5000_hcmd, 259 .hcmd = &iwl5000_hcmd,
@@ -267,7 +268,7 @@ static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
267 .calc_rssi = iwl5000_calc_rssi, 268 .calc_rssi = iwl5000_calc_rssi,
268}; 269};
269 270
270static struct iwl_ops iwl6050_ops = { 271static const struct iwl_ops iwl6050_ops = {
271 .ucode = &iwl5000_ucode, 272 .ucode = &iwl5000_ucode,
272 .lib = &iwl6000_lib, 273 .lib = &iwl6000_lib,
273 .hcmd = &iwl5000_hcmd, 274 .hcmd = &iwl5000_hcmd,
@@ -306,7 +307,6 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
306 .supports_idle = true, 307 .supports_idle = true,
307 .adv_thermal_throttle = true, 308 .adv_thermal_throttle = true,
308 .support_ct_kill_exit = true, 309 .support_ct_kill_exit = true,
309 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
310}; 310};
311 311
312struct iwl_cfg iwl6000i_2abg_cfg = { 312struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -395,7 +395,6 @@ struct iwl_cfg iwl6050_2agn_cfg = {
395 .supports_idle = true, 395 .supports_idle = true,
396 .adv_thermal_throttle = true, 396 .adv_thermal_throttle = true,
397 .support_ct_kill_exit = true, 397 .support_ct_kill_exit = true,
398 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC,
399}; 398};
400 399
401struct iwl_cfg iwl6050_2abg_cfg = { 400struct iwl_cfg iwl6050_2abg_cfg = {
@@ -455,7 +454,6 @@ struct iwl_cfg iwl6000_3agn_cfg = {
455 .supports_idle = true, 454 .supports_idle = true,
456 .adv_thermal_throttle = true, 455 .adv_thermal_throttle = true,
457 .support_ct_kill_exit = true, 456 .support_ct_kill_exit = true,
458 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
459}; 457};
460 458
461MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 459MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 1c9866daf81..344e99de4ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -657,6 +657,131 @@ static void iwl_bg_statistics_periodic(unsigned long data)
657 iwl_send_statistics_request(priv, CMD_ASYNC, false); 657 iwl_send_statistics_request(priv, CMD_ASYNC, false);
658} 658}
659 659
660
661static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
662 u32 start_idx, u32 num_events,
663 u32 mode)
664{
665 u32 i;
666 u32 ptr; /* SRAM byte address of log data */
667 u32 ev, time, data; /* event log data */
668 unsigned long reg_flags;
669
670 if (mode == 0)
671 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
672 else
673 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
674
675 /* Make sure device is powered up for SRAM reads */
676 spin_lock_irqsave(&priv->reg_lock, reg_flags);
677 if (iwl_grab_nic_access(priv)) {
678 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
679 return;
680 }
681
682 /* Set starting address; reads will auto-increment */
683 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
684 rmb();
685
686 /*
687 * "time" is actually "data" for mode 0 (no timestamp).
688 * place event id # at far right for easier visual parsing.
689 */
690 for (i = 0; i < num_events; i++) {
691 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
692 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
693 if (mode == 0) {
694 trace_iwlwifi_dev_ucode_cont_event(priv,
695 0, time, ev);
696 } else {
697 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
698 trace_iwlwifi_dev_ucode_cont_event(priv,
699 time, data, ev);
700 }
701 }
702 /* Allow device to power down */
703 iwl_release_nic_access(priv);
704 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
705}
706
707void iwl_continuous_event_trace(struct iwl_priv *priv)
708{
709 u32 capacity; /* event log capacity in # entries */
710 u32 base; /* SRAM byte address of event log header */
711 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
712 u32 num_wraps; /* # times uCode wrapped to top of log */
713 u32 next_entry; /* index of next entry to be written by uCode */
714
715 if (priv->ucode_type == UCODE_INIT)
716 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
717 else
718 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
719 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
720 capacity = iwl_read_targ_mem(priv, base);
721 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
722 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
723 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
724 } else
725 return;
726
727 if (num_wraps == priv->event_log.num_wraps) {
728 iwl_print_cont_event_trace(priv,
729 base, priv->event_log.next_entry,
730 next_entry - priv->event_log.next_entry,
731 mode);
732 priv->event_log.non_wraps_count++;
733 } else {
734 if ((num_wraps - priv->event_log.num_wraps) > 1)
735 priv->event_log.wraps_more_count++;
736 else
737 priv->event_log.wraps_once_count++;
738 trace_iwlwifi_dev_ucode_wrap_event(priv,
739 num_wraps - priv->event_log.num_wraps,
740 next_entry, priv->event_log.next_entry);
741 if (next_entry < priv->event_log.next_entry) {
742 iwl_print_cont_event_trace(priv, base,
743 priv->event_log.next_entry,
744 capacity - priv->event_log.next_entry,
745 mode);
746
747 iwl_print_cont_event_trace(priv, base, 0,
748 next_entry, mode);
749 } else {
750 iwl_print_cont_event_trace(priv, base,
751 next_entry, capacity - next_entry,
752 mode);
753
754 iwl_print_cont_event_trace(priv, base, 0,
755 next_entry, mode);
756 }
757 }
758 priv->event_log.num_wraps = num_wraps;
759 priv->event_log.next_entry = next_entry;
760}
761
762/**
763 * iwl_bg_ucode_trace - Timer callback to log ucode event
764 *
765 * The timer is continually set to execute every
766 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
767 * this function is to perform continuous uCode event logging operation
768 * if enabled
769 */
770static void iwl_bg_ucode_trace(unsigned long data)
771{
772 struct iwl_priv *priv = (struct iwl_priv *)data;
773
774 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
775 return;
776
777 if (priv->event_log.ucode_trace) {
778 iwl_continuous_event_trace(priv);
779 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
780 mod_timer(&priv->ucode_trace,
781 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
782 }
783}
784
660static void iwl_rx_beacon_notif(struct iwl_priv *priv, 785static void iwl_rx_beacon_notif(struct iwl_priv *priv,
661 struct iwl_rx_mem_buffer *rxb) 786 struct iwl_rx_mem_buffer *rxb)
662{ 787{
@@ -689,12 +814,14 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
689 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 814 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
690 unsigned long status = priv->status; 815 unsigned long status = priv->status;
691 816
692 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n", 817 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
693 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 818 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
694 (flags & SW_CARD_DISABLED) ? "Kill" : "On"); 819 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
820 (flags & CT_CARD_DISABLED) ?
821 "Reached" : "Not reached");
695 822
696 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 823 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
697 RF_CARD_DISABLED)) { 824 CT_CARD_DISABLED)) {
698 825
699 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 826 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
700 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 827 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
@@ -708,10 +835,10 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
708 iwl_write_direct32(priv, HBUS_TARG_MBX_C, 835 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
709 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 836 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
710 } 837 }
711 if (flags & RF_CARD_DISABLED) 838 if (flags & CT_CARD_DISABLED)
712 iwl_tt_enter_ct_kill(priv); 839 iwl_tt_enter_ct_kill(priv);
713 } 840 }
714 if (!(flags & RF_CARD_DISABLED)) 841 if (!(flags & CT_CARD_DISABLED))
715 iwl_tt_exit_ct_kill(priv); 842 iwl_tt_exit_ct_kill(priv);
716 843
717 if (flags & HW_CARD_DISABLED) 844 if (flags & HW_CARD_DISABLED)
@@ -1705,8 +1832,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1705 * iwl_print_event_log - Dump error event log to syslog 1832 * iwl_print_event_log - Dump error event log to syslog
1706 * 1833 *
1707 */ 1834 */
1708static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, 1835static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1709 u32 num_events, u32 mode) 1836 u32 num_events, u32 mode,
1837 int pos, char **buf, size_t bufsz)
1710{ 1838{
1711 u32 i; 1839 u32 i;
1712 u32 base; /* SRAM byte address of event log header */ 1840 u32 base; /* SRAM byte address of event log header */
@@ -1716,7 +1844,7 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1716 unsigned long reg_flags; 1844 unsigned long reg_flags;
1717 1845
1718 if (num_events == 0) 1846 if (num_events == 0)
1719 return; 1847 return pos;
1720 if (priv->ucode_type == UCODE_INIT) 1848 if (priv->ucode_type == UCODE_INIT)
1721 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 1849 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1722 else 1850 else
@@ -1744,27 +1872,44 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1744 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1872 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1745 if (mode == 0) { 1873 if (mode == 0) {
1746 /* data, ev */ 1874 /* data, ev */
1747 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev); 1875 if (bufsz) {
1748 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev); 1876 pos += scnprintf(*buf + pos, bufsz - pos,
1877 "EVT_LOG:0x%08x:%04u\n",
1878 time, ev);
1879 } else {
1880 trace_iwlwifi_dev_ucode_event(priv, 0,
1881 time, ev);
1882 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1883 time, ev);
1884 }
1749 } else { 1885 } else {
1750 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1886 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1751 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", 1887 if (bufsz) {
1888 pos += scnprintf(*buf + pos, bufsz - pos,
1889 "EVT_LOGT:%010u:0x%08x:%04u\n",
1890 time, data, ev);
1891 } else {
1892 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1752 time, data, ev); 1893 time, data, ev);
1753 trace_iwlwifi_dev_ucode_event(priv, time, data, ev); 1894 trace_iwlwifi_dev_ucode_event(priv, time,
1895 data, ev);
1896 }
1754 } 1897 }
1755 } 1898 }
1756 1899
1757 /* Allow device to power down */ 1900 /* Allow device to power down */
1758 iwl_release_nic_access(priv); 1901 iwl_release_nic_access(priv);
1759 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 1902 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1903 return pos;
1760} 1904}
1761 1905
1762/** 1906/**
1763 * iwl_print_last_event_logs - Dump the newest # of event log to syslog 1907 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1764 */ 1908 */
1765static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, 1909static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1766 u32 num_wraps, u32 next_entry, 1910 u32 num_wraps, u32 next_entry,
1767 u32 size, u32 mode) 1911 u32 size, u32 mode,
1912 int pos, char **buf, size_t bufsz)
1768{ 1913{
1769 /* 1914 /*
1770 * display the newest DEFAULT_LOG_ENTRIES entries 1915 * display the newest DEFAULT_LOG_ENTRIES entries
@@ -1772,21 +1917,26 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1772 */ 1917 */
1773 if (num_wraps) { 1918 if (num_wraps) {
1774 if (next_entry < size) { 1919 if (next_entry < size) {
1775 iwl_print_event_log(priv, 1920 pos = iwl_print_event_log(priv,
1776 capacity - (size - next_entry), 1921 capacity - (size - next_entry),
1777 size - next_entry, mode); 1922 size - next_entry, mode,
1778 iwl_print_event_log(priv, 0, 1923 pos, buf, bufsz);
1779 next_entry, mode); 1924 pos = iwl_print_event_log(priv, 0,
1925 next_entry, mode,
1926 pos, buf, bufsz);
1780 } else 1927 } else
1781 iwl_print_event_log(priv, next_entry - size, 1928 pos = iwl_print_event_log(priv, next_entry - size,
1782 size, mode); 1929 size, mode, pos, buf, bufsz);
1783 } else { 1930 } else {
1784 if (next_entry < size) 1931 if (next_entry < size) {
1785 iwl_print_event_log(priv, 0, next_entry, mode); 1932 pos = iwl_print_event_log(priv, 0, next_entry,
1786 else 1933 mode, pos, buf, bufsz);
1787 iwl_print_event_log(priv, next_entry - size, 1934 } else {
1788 size, mode); 1935 pos = iwl_print_event_log(priv, next_entry - size,
1936 size, mode, pos, buf, bufsz);
1937 }
1789 } 1938 }
1939 return pos;
1790} 1940}
1791 1941
1792/* For sanity check only. Actual size is determined by uCode, typ. 512 */ 1942/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1794,7 +1944,8 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1794 1944
1795#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) 1945#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1796 1946
1797void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log) 1947int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1948 char **buf, bool display)
1798{ 1949{
1799 u32 base; /* SRAM byte address of event log header */ 1950 u32 base; /* SRAM byte address of event log header */
1800 u32 capacity; /* event log capacity in # entries */ 1951 u32 capacity; /* event log capacity in # entries */
@@ -1802,6 +1953,8 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1802 u32 num_wraps; /* # times uCode wrapped to top of log */ 1953 u32 num_wraps; /* # times uCode wrapped to top of log */
1803 u32 next_entry; /* index of next entry to be written by uCode */ 1954 u32 next_entry; /* index of next entry to be written by uCode */
1804 u32 size; /* # entries that we'll print */ 1955 u32 size; /* # entries that we'll print */
1956 int pos = 0;
1957 size_t bufsz = 0;
1805 1958
1806 if (priv->ucode_type == UCODE_INIT) 1959 if (priv->ucode_type == UCODE_INIT)
1807 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 1960 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
@@ -1812,7 +1965,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1812 IWL_ERR(priv, 1965 IWL_ERR(priv,
1813 "Invalid event log pointer 0x%08X for %s uCode\n", 1966 "Invalid event log pointer 0x%08X for %s uCode\n",
1814 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); 1967 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1815 return; 1968 return pos;
1816 } 1969 }
1817 1970
1818 /* event log header */ 1971 /* event log header */
@@ -1838,7 +1991,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1838 /* bail out if nothing in log */ 1991 /* bail out if nothing in log */
1839 if (size == 0) { 1992 if (size == 0) {
1840 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); 1993 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1841 return; 1994 return pos;
1842 } 1995 }
1843 1996
1844#ifdef CONFIG_IWLWIFI_DEBUG 1997#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1853,6 +2006,15 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1853 size); 2006 size);
1854 2007
1855#ifdef CONFIG_IWLWIFI_DEBUG 2008#ifdef CONFIG_IWLWIFI_DEBUG
2009 if (display) {
2010 if (full_log)
2011 bufsz = capacity * 48;
2012 else
2013 bufsz = size * 48;
2014 *buf = kmalloc(bufsz, GFP_KERNEL);
2015 if (!*buf)
2016 return pos;
2017 }
1856 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 2018 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1857 /* 2019 /*
1858 * if uCode has wrapped back to top of log, 2020 * if uCode has wrapped back to top of log,
@@ -1860,17 +2022,22 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1860 * i.e the next one that uCode would fill. 2022 * i.e the next one that uCode would fill.
1861 */ 2023 */
1862 if (num_wraps) 2024 if (num_wraps)
1863 iwl_print_event_log(priv, next_entry, 2025 pos = iwl_print_event_log(priv, next_entry,
1864 capacity - next_entry, mode); 2026 capacity - next_entry, mode,
2027 pos, buf, bufsz);
1865 /* (then/else) start at top of log */ 2028 /* (then/else) start at top of log */
1866 iwl_print_event_log(priv, 0, next_entry, mode); 2029 pos = iwl_print_event_log(priv, 0,
2030 next_entry, mode, pos, buf, bufsz);
1867 } else 2031 } else
1868 iwl_print_last_event_logs(priv, capacity, num_wraps, 2032 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1869 next_entry, size, mode); 2033 next_entry, size, mode,
2034 pos, buf, bufsz);
1870#else 2035#else
1871 iwl_print_last_event_logs(priv, capacity, num_wraps, 2036 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1872 next_entry, size, mode); 2037 next_entry, size, mode,
2038 pos, buf, bufsz);
1873#endif 2039#endif
2040 return pos;
1874} 2041}
1875 2042
1876/** 2043/**
@@ -2456,6 +2623,10 @@ static int iwl_setup_mac(struct iwl_priv *priv)
2456 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 2623 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2457 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 2624 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2458 2625
2626 if (priv->cfg->sku & IWL_SKU_N)
2627 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2628 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2629
2459 hw->sta_data_size = sizeof(struct iwl_station_priv); 2630 hw->sta_data_size = sizeof(struct iwl_station_priv);
2460 hw->wiphy->interface_modes = 2631 hw->wiphy->interface_modes =
2461 BIT(NL80211_IFTYPE_STATION) | 2632 BIT(NL80211_IFTYPE_STATION) |
@@ -2784,6 +2955,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2784 return 0; 2955 return 0;
2785 else 2956 else
2786 return ret; 2957 return ret;
2958 case IEEE80211_AMPDU_TX_OPERATIONAL:
2959 /* do nothing */
2960 return -EOPNOTSUPP;
2787 default: 2961 default:
2788 IWL_DEBUG_HT(priv, "unknown\n"); 2962 IWL_DEBUG_HT(priv, "unknown\n");
2789 return -EINVAL; 2963 return -EINVAL;
@@ -3126,6 +3300,10 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3126 priv->statistics_periodic.data = (unsigned long)priv; 3300 priv->statistics_periodic.data = (unsigned long)priv;
3127 priv->statistics_periodic.function = iwl_bg_statistics_periodic; 3301 priv->statistics_periodic.function = iwl_bg_statistics_periodic;
3128 3302
3303 init_timer(&priv->ucode_trace);
3304 priv->ucode_trace.data = (unsigned long)priv;
3305 priv->ucode_trace.function = iwl_bg_ucode_trace;
3306
3129 if (!priv->cfg->use_isr_legacy) 3307 if (!priv->cfg->use_isr_legacy)
3130 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3308 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3131 iwl_irq_tasklet, (unsigned long)priv); 3309 iwl_irq_tasklet, (unsigned long)priv);
@@ -3144,6 +3322,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3144 cancel_delayed_work(&priv->alive_start); 3322 cancel_delayed_work(&priv->alive_start);
3145 cancel_work_sync(&priv->beacon_update); 3323 cancel_work_sync(&priv->beacon_update);
3146 del_timer_sync(&priv->statistics_periodic); 3324 del_timer_sync(&priv->statistics_periodic);
3325 del_timer_sync(&priv->ucode_trace);
3147} 3326}
3148 3327
3149static void iwl_init_hw_rates(struct iwl_priv *priv, 3328static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3188,6 +3367,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
3188 priv->band = IEEE80211_BAND_2GHZ; 3367 priv->band = IEEE80211_BAND_2GHZ;
3189 3368
3190 priv->iw_mode = NL80211_IFTYPE_STATION; 3369 priv->iw_mode = NL80211_IFTYPE_STATION;
3370 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3191 3371
3192 /* Choose which receivers/antennas to use */ 3372 /* Choose which receivers/antennas to use */
3193 if (priv->cfg->ops->hcmd->set_rxon_chain) 3373 if (priv->cfg->ops->hcmd->set_rxon_chain)
@@ -3589,7 +3769,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3589 *****************************************************************************/ 3769 *****************************************************************************/
3590 3770
3591/* Hardware specific file defines the PCI IDs table for that hardware module */ 3771/* Hardware specific file defines the PCI IDs table for that hardware module */
3592static struct pci_device_id iwl_hw_card_ids[] = { 3772static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3593#ifdef CONFIG_IWL4965 3773#ifdef CONFIG_IWL4965
3594 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, 3774 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
3595 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 3775 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 95a57b36a7e..dc61906290e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -414,7 +414,6 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
414/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 414/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
415static int iwl_sensitivity_write(struct iwl_priv *priv) 415static int iwl_sensitivity_write(struct iwl_priv *priv)
416{ 416{
417 int ret = 0;
418 struct iwl_sensitivity_cmd cmd ; 417 struct iwl_sensitivity_cmd cmd ;
419 struct iwl_sensitivity_data *data = NULL; 418 struct iwl_sensitivity_data *data = NULL;
420 struct iwl_host_cmd cmd_out = { 419 struct iwl_host_cmd cmd_out = {
@@ -477,11 +476,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
477 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 476 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
478 sizeof(u16)*HD_TABLE_SIZE); 477 sizeof(u16)*HD_TABLE_SIZE);
479 478
480 ret = iwl_send_cmd(priv, &cmd_out); 479 return iwl_send_cmd(priv, &cmd_out);
481 if (ret)
482 IWL_ERR(priv, "SENSITIVITY_CMD failed\n");
483
484 return ret;
485} 480}
486 481
487void iwl_init_sensitivity(struct iwl_priv *priv) 482void iwl_init_sensitivity(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e9150753192..3320cce3d57 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -120,7 +120,6 @@ enum {
120 CALIBRATION_COMPLETE_NOTIFICATION = 0x67, 120 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
121 121
122 /* 802.11h related */ 122 /* 802.11h related */
123 RADAR_NOTIFICATION = 0x70, /* not used */
124 REPLY_QUIET_CMD = 0x71, /* not used */ 123 REPLY_QUIET_CMD = 0x71, /* not used */
125 REPLY_CHANNEL_SWITCH = 0x72, 124 REPLY_CHANNEL_SWITCH = 0x72,
126 CHANNEL_SWITCH_NOTIFICATION = 0x73, 125 CHANNEL_SWITCH_NOTIFICATION = 0x73,
@@ -2510,7 +2509,7 @@ struct iwl_card_state_notif {
2510 2509
2511#define HW_CARD_DISABLED 0x01 2510#define HW_CARD_DISABLED 0x01
2512#define SW_CARD_DISABLED 0x02 2511#define SW_CARD_DISABLED 0x02
2513#define RF_CARD_DISABLED 0x04 2512#define CT_CARD_DISABLED 0x04
2514#define RXON_CARD_DISABLED 0x10 2513#define RXON_CARD_DISABLED 0x10
2515 2514
2516struct iwl_ct_kill_config { 2515struct iwl_ct_kill_config {
@@ -2984,7 +2983,7 @@ struct statistics_rx_ht_phy {
2984 __le32 agg_crc32_good; 2983 __le32 agg_crc32_good;
2985 __le32 agg_mpdu_cnt; 2984 __le32 agg_mpdu_cnt;
2986 __le32 agg_cnt; 2985 __le32 agg_cnt;
2987 __le32 reserved2; 2986 __le32 unsupport_mcs;
2988} __attribute__ ((packed)); 2987} __attribute__ ((packed));
2989 2988
2990#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) 2989#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
@@ -3087,8 +3086,8 @@ struct statistics_div {
3087} __attribute__ ((packed)); 3086} __attribute__ ((packed));
3088 3087
3089struct statistics_general { 3088struct statistics_general {
3090 __le32 temperature; 3089 __le32 temperature; /* radio temperature */
3091 __le32 temperature_m; 3090 __le32 temperature_m; /* for 5000 and up, this is radio voltage */
3092 struct statistics_dbg dbg; 3091 struct statistics_dbg dbg;
3093 __le32 sleep_time; 3092 __le32 sleep_time;
3094 __le32 slots_out; 3093 __le32 slots_out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 5461f105bd2..5b56307a381 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -450,8 +450,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
450 if (priv->cfg->ht_greenfield_support) 450 if (priv->cfg->ht_greenfield_support)
451 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; 451 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
452 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 452 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
453 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
454 (priv->cfg->sm_ps_mode << 2));
455 max_bit_rate = MAX_BIT_RATE_20_MHZ; 453 max_bit_rate = MAX_BIT_RATE_20_MHZ;
456 if (priv->hw_params.ht40_channel & BIT(band)) { 454 if (priv->hw_params.ht40_channel & BIT(band)) {
457 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 455 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -636,7 +634,7 @@ EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
636 634
637static bool is_single_rx_stream(struct iwl_priv *priv) 635static bool is_single_rx_stream(struct iwl_priv *priv)
638{ 636{
639 return !priv->current_ht_config.is_ht || 637 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
640 priv->current_ht_config.single_chain_sufficient; 638 priv->current_ht_config.single_chain_sufficient;
641} 639}
642 640
@@ -1003,28 +1001,18 @@ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
1003 */ 1001 */
1004static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) 1002static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1005{ 1003{
1006 int idle_cnt = active_cnt; 1004 /* # Rx chains when idling, depending on SMPS mode */
1007 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 1005 switch (priv->current_ht_config.smps) {
1008 1006 case IEEE80211_SMPS_STATIC:
1009 /* # Rx chains when idling and maybe trying to save power */ 1007 case IEEE80211_SMPS_DYNAMIC:
1010 switch (priv->cfg->sm_ps_mode) { 1008 return IWL_NUM_IDLE_CHAINS_SINGLE;
1011 case WLAN_HT_CAP_SM_PS_STATIC: 1009 case IEEE80211_SMPS_OFF:
1012 idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE; 1010 return active_cnt;
1013 break;
1014 case WLAN_HT_CAP_SM_PS_DYNAMIC:
1015 idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
1016 IWL_NUM_IDLE_CHAINS_SINGLE;
1017 break;
1018 case WLAN_HT_CAP_SM_PS_DISABLED:
1019 break;
1020 case WLAN_HT_CAP_SM_PS_INVALID:
1021 default: 1011 default:
1022 IWL_ERR(priv, "invalid sm_ps mode %u\n", 1012 WARN(1, "invalid SMPS mode %d",
1023 priv->cfg->sm_ps_mode); 1013 priv->current_ht_config.smps);
1024 WARN_ON(1); 1014 return active_cnt;
1025 break;
1026 } 1015 }
1027 return idle_cnt;
1028} 1016}
1029 1017
1030/* up to 4 chains */ 1018/* up to 4 chains */
@@ -1363,7 +1351,9 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1363 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1351 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1364 1352
1365 priv->cfg->ops->lib->dump_nic_error_log(priv); 1353 priv->cfg->ops->lib->dump_nic_error_log(priv);
1366 priv->cfg->ops->lib->dump_nic_event_log(priv, false); 1354 if (priv->cfg->ops->lib->dump_csr)
1355 priv->cfg->ops->lib->dump_csr(priv);
1356 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
1367#ifdef CONFIG_IWLWIFI_DEBUG 1357#ifdef CONFIG_IWLWIFI_DEBUG
1368 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) 1358 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
1369 iwl_print_rx_config_cmd(priv); 1359 iwl_print_rx_config_cmd(priv);
@@ -2599,12 +2589,12 @@ int iwl_set_mode(struct iwl_priv *priv, int mode)
2599EXPORT_SYMBOL(iwl_set_mode); 2589EXPORT_SYMBOL(iwl_set_mode);
2600 2590
2601int iwl_mac_add_interface(struct ieee80211_hw *hw, 2591int iwl_mac_add_interface(struct ieee80211_hw *hw,
2602 struct ieee80211_if_init_conf *conf) 2592 struct ieee80211_vif *vif)
2603{ 2593{
2604 struct iwl_priv *priv = hw->priv; 2594 struct iwl_priv *priv = hw->priv;
2605 unsigned long flags; 2595 unsigned long flags;
2606 2596
2607 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type); 2597 IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
2608 2598
2609 if (priv->vif) { 2599 if (priv->vif) {
2610 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n"); 2600 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
@@ -2612,19 +2602,19 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2612 } 2602 }
2613 2603
2614 spin_lock_irqsave(&priv->lock, flags); 2604 spin_lock_irqsave(&priv->lock, flags);
2615 priv->vif = conf->vif; 2605 priv->vif = vif;
2616 priv->iw_mode = conf->type; 2606 priv->iw_mode = vif->type;
2617 2607
2618 spin_unlock_irqrestore(&priv->lock, flags); 2608 spin_unlock_irqrestore(&priv->lock, flags);
2619 2609
2620 mutex_lock(&priv->mutex); 2610 mutex_lock(&priv->mutex);
2621 2611
2622 if (conf->mac_addr) { 2612 if (vif->addr) {
2623 IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr); 2613 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
2624 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 2614 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
2625 } 2615 }
2626 2616
2627 if (iwl_set_mode(priv, conf->type) == -EAGAIN) 2617 if (iwl_set_mode(priv, vif->type) == -EAGAIN)
2628 /* we are not ready, will run again when ready */ 2618 /* we are not ready, will run again when ready */
2629 set_bit(STATUS_MODE_PENDING, &priv->status); 2619 set_bit(STATUS_MODE_PENDING, &priv->status);
2630 2620
@@ -2636,7 +2626,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2636EXPORT_SYMBOL(iwl_mac_add_interface); 2626EXPORT_SYMBOL(iwl_mac_add_interface);
2637 2627
2638void iwl_mac_remove_interface(struct ieee80211_hw *hw, 2628void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2639 struct ieee80211_if_init_conf *conf) 2629 struct ieee80211_vif *vif)
2640{ 2630{
2641 struct iwl_priv *priv = hw->priv; 2631 struct iwl_priv *priv = hw->priv;
2642 2632
@@ -2649,7 +2639,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2649 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2639 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2650 iwlcore_commit_rxon(priv); 2640 iwlcore_commit_rxon(priv);
2651 } 2641 }
2652 if (priv->vif == conf->vif) { 2642 if (priv->vif == vif) {
2653 priv->vif = NULL; 2643 priv->vif = NULL;
2654 memset(priv->bssid, 0, ETH_ALEN); 2644 memset(priv->bssid, 0, ETH_ALEN);
2655 } 2645 }
@@ -2689,6 +2679,21 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2689 IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); 2679 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2690 } 2680 }
2691 2681
2682 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2683 IEEE80211_CONF_CHANGE_CHANNEL)) {
2684 /* mac80211 uses static for non-HT which is what we want */
2685 priv->current_ht_config.smps = conf->smps_mode;
2686
2687 /*
2688 * Recalculate chain counts.
2689 *
2690 * If monitor mode is enabled then mac80211 will
2691 * set up the SM PS mode to OFF if an HT channel is
2692 * configured.
2693 */
2694 if (priv->cfg->ops->hcmd->set_rxon_chain)
2695 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2696 }
2692 2697
2693 /* during scanning mac80211 will delay channel setting until 2698 /* during scanning mac80211 will delay channel setting until
2694 * scan finish with changed = 0 2699 * scan finish with changed = 0
@@ -2785,10 +2790,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2785 iwl_set_tx_power(priv, conf->power_level, false); 2790 iwl_set_tx_power(priv, conf->power_level, false);
2786 } 2791 }
2787 2792
2788 /* call to ensure that 4965 rx_chain is set properly in monitor mode */
2789 if (priv->cfg->ops->hcmd->set_rxon_chain)
2790 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2791
2792 if (!iwl_is_ready(priv)) { 2793 if (!iwl_is_ready(priv)) {
2793 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2794 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2794 goto out; 2795 goto out;
@@ -3196,6 +3197,77 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
3196EXPORT_SYMBOL(iwl_update_stats); 3197EXPORT_SYMBOL(iwl_update_stats);
3197#endif 3198#endif
3198 3199
3200const static char *get_csr_string(int cmd)
3201{
3202 switch (cmd) {
3203 IWL_CMD(CSR_HW_IF_CONFIG_REG);
3204 IWL_CMD(CSR_INT_COALESCING);
3205 IWL_CMD(CSR_INT);
3206 IWL_CMD(CSR_INT_MASK);
3207 IWL_CMD(CSR_FH_INT_STATUS);
3208 IWL_CMD(CSR_GPIO_IN);
3209 IWL_CMD(CSR_RESET);
3210 IWL_CMD(CSR_GP_CNTRL);
3211 IWL_CMD(CSR_HW_REV);
3212 IWL_CMD(CSR_EEPROM_REG);
3213 IWL_CMD(CSR_EEPROM_GP);
3214 IWL_CMD(CSR_OTP_GP_REG);
3215 IWL_CMD(CSR_GIO_REG);
3216 IWL_CMD(CSR_GP_UCODE_REG);
3217 IWL_CMD(CSR_GP_DRIVER_REG);
3218 IWL_CMD(CSR_UCODE_DRV_GP1);
3219 IWL_CMD(CSR_UCODE_DRV_GP2);
3220 IWL_CMD(CSR_LED_REG);
3221 IWL_CMD(CSR_DRAM_INT_TBL_REG);
3222 IWL_CMD(CSR_GIO_CHICKEN_BITS);
3223 IWL_CMD(CSR_ANA_PLL_CFG);
3224 IWL_CMD(CSR_HW_REV_WA_REG);
3225 IWL_CMD(CSR_DBG_HPET_MEM_REG);
3226 default:
3227 return "UNKNOWN";
3228
3229 }
3230}
3231
3232void iwl_dump_csr(struct iwl_priv *priv)
3233{
3234 int i;
3235 u32 csr_tbl[] = {
3236 CSR_HW_IF_CONFIG_REG,
3237 CSR_INT_COALESCING,
3238 CSR_INT,
3239 CSR_INT_MASK,
3240 CSR_FH_INT_STATUS,
3241 CSR_GPIO_IN,
3242 CSR_RESET,
3243 CSR_GP_CNTRL,
3244 CSR_HW_REV,
3245 CSR_EEPROM_REG,
3246 CSR_EEPROM_GP,
3247 CSR_OTP_GP_REG,
3248 CSR_GIO_REG,
3249 CSR_GP_UCODE_REG,
3250 CSR_GP_DRIVER_REG,
3251 CSR_UCODE_DRV_GP1,
3252 CSR_UCODE_DRV_GP2,
3253 CSR_LED_REG,
3254 CSR_DRAM_INT_TBL_REG,
3255 CSR_GIO_CHICKEN_BITS,
3256 CSR_ANA_PLL_CFG,
3257 CSR_HW_REV_WA_REG,
3258 CSR_DBG_HPET_MEM_REG
3259 };
3260 IWL_ERR(priv, "CSR values:\n");
3261 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
3262 "CSR_INT_PERIODIC_REG)\n");
3263 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
3264 IWL_ERR(priv, " %25s: 0X%08x\n",
3265 get_csr_string(csr_tbl[i]),
3266 iwl_read32(priv, csr_tbl[i]));
3267 }
3268}
3269EXPORT_SYMBOL(iwl_dump_csr);
3270
3199#ifdef CONFIG_PM 3271#ifdef CONFIG_PM
3200 3272
3201int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) 3273int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27ca859e745..8deb83bfe18 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,8 +63,6 @@
63#ifndef __iwl_core_h__ 63#ifndef __iwl_core_h__
64#define __iwl_core_h__ 64#define __iwl_core_h__
65 65
66#include <generated/utsrelease.h>
67
68/************************ 66/************************
69 * forward declarations * 67 * forward declarations *
70 ************************/ 68 ************************/
@@ -72,7 +70,7 @@ struct iwl_host_cmd;
72struct iwl_cmd; 70struct iwl_cmd;
73 71
74 72
75#define IWLWIFI_VERSION UTS_RELEASE "-k" 73#define IWLWIFI_VERSION "in-tree:"
76#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation" 74#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
77#define DRV_AUTHOR "<ilw@linux.intel.com>" 75#define DRV_AUTHOR "<ilw@linux.intel.com>"
78 76
@@ -169,8 +167,10 @@ struct iwl_lib_ops {
169 int (*is_valid_rtc_data_addr)(u32 addr); 167 int (*is_valid_rtc_data_addr)(u32 addr);
170 /* 1st ucode load */ 168 /* 1st ucode load */
171 int (*load_ucode)(struct iwl_priv *priv); 169 int (*load_ucode)(struct iwl_priv *priv);
172 void (*dump_nic_event_log)(struct iwl_priv *priv, bool full_log); 170 int (*dump_nic_event_log)(struct iwl_priv *priv,
171 bool full_log, char **buf, bool display);
173 void (*dump_nic_error_log)(struct iwl_priv *priv); 172 void (*dump_nic_error_log)(struct iwl_priv *priv);
173 void (*dump_csr)(struct iwl_priv *priv);
174 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel); 174 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
175 /* power management */ 175 /* power management */
176 struct iwl_apm_ops apm_ops; 176 struct iwl_apm_ops apm_ops;
@@ -230,7 +230,6 @@ struct iwl_mod_params {
230 * @chain_noise_num_beacons: number of beacons used to compute chain noise 230 * @chain_noise_num_beacons: number of beacons used to compute chain noise
231 * @adv_thermal_throttle: support advance thermal throttle 231 * @adv_thermal_throttle: support advance thermal throttle
232 * @support_ct_kill_exit: support ct kill exit condition 232 * @support_ct_kill_exit: support ct kill exit condition
233 * @sm_ps_mode: spatial multiplexing power save mode
234 * @support_wimax_coexist: support wimax/wifi co-exist 233 * @support_wimax_coexist: support wimax/wifi co-exist
235 * 234 *
236 * We enable the driver to be backward compatible wrt API version. The 235 * We enable the driver to be backward compatible wrt API version. The
@@ -287,7 +286,6 @@ struct iwl_cfg {
287 const bool supports_idle; 286 const bool supports_idle;
288 bool adv_thermal_throttle; 287 bool adv_thermal_throttle;
289 bool support_ct_kill_exit; 288 bool support_ct_kill_exit;
290 u8 sm_ps_mode;
291 const bool support_wimax_coexist; 289 const bool support_wimax_coexist;
292}; 290};
293 291
@@ -332,9 +330,9 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
332int iwl_commit_rxon(struct iwl_priv *priv); 330int iwl_commit_rxon(struct iwl_priv *priv);
333int iwl_set_mode(struct iwl_priv *priv, int mode); 331int iwl_set_mode(struct iwl_priv *priv, int mode);
334int iwl_mac_add_interface(struct ieee80211_hw *hw, 332int iwl_mac_add_interface(struct ieee80211_hw *hw,
335 struct ieee80211_if_init_conf *conf); 333 struct ieee80211_vif *vif);
336void iwl_mac_remove_interface(struct ieee80211_hw *hw, 334void iwl_mac_remove_interface(struct ieee80211_hw *hw,
337 struct ieee80211_if_init_conf *conf); 335 struct ieee80211_vif *vif);
338int iwl_mac_config(struct ieee80211_hw *hw, u32 changed); 336int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
339void iwl_config_ap(struct iwl_priv *priv); 337void iwl_config_ap(struct iwl_priv *priv);
340int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, 338int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
@@ -581,7 +579,9 @@ int iwl_pci_resume(struct pci_dev *pdev);
581* Error Handling Debugging 579* Error Handling Debugging
582******************************************************/ 580******************************************************/
583void iwl_dump_nic_error_log(struct iwl_priv *priv); 581void iwl_dump_nic_error_log(struct iwl_priv *priv);
584void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log); 582int iwl_dump_nic_event_log(struct iwl_priv *priv,
583 bool full_log, char **buf, bool display);
584void iwl_dump_csr(struct iwl_priv *priv);
585#ifdef CONFIG_IWLWIFI_DEBUG 585#ifdef CONFIG_IWLWIFI_DEBUG
586void iwl_print_rx_config_cmd(struct iwl_priv *priv); 586void iwl_print_rx_config_cmd(struct iwl_priv *priv);
587#else 587#else
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d61293ab67c..58e0462cafa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -109,6 +109,8 @@ struct iwl_debugfs {
109 struct dentry *file_power_save_status; 109 struct dentry *file_power_save_status;
110 struct dentry *file_clear_ucode_statistics; 110 struct dentry *file_clear_ucode_statistics;
111 struct dentry *file_clear_traffic_statistics; 111 struct dentry *file_clear_traffic_statistics;
112 struct dentry *file_csr;
113 struct dentry *file_ucode_tracing;
112 } dbgfs_debug_files; 114 } dbgfs_debug_files;
113 u32 sram_offset; 115 u32 sram_offset;
114 u32 sram_len; 116 u32 sram_len;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 21e0f6699da..4a2ac9311ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -125,7 +125,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
125 char __user *user_buf, 125 char __user *user_buf,
126 size_t count, loff_t *ppos) { 126 size_t count, loff_t *ppos) {
127 127
128 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 128 struct iwl_priv *priv = file->private_data;
129 char *buf; 129 char *buf;
130 int pos = 0; 130 int pos = 0;
131 131
@@ -184,7 +184,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
184 char __user *user_buf, 184 char __user *user_buf,
185 size_t count, loff_t *ppos) { 185 size_t count, loff_t *ppos) {
186 186
187 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 187 struct iwl_priv *priv = file->private_data;
188 char *buf; 188 char *buf;
189 int pos = 0; 189 int pos = 0;
190 int cnt; 190 int cnt;
@@ -232,7 +232,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
232 ssize_t ret; 232 ssize_t ret;
233 int i; 233 int i;
234 int pos = 0; 234 int pos = 0;
235 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 235 struct iwl_priv *priv = file->private_data;
236 size_t bufsz; 236 size_t bufsz;
237 237
238 /* default is to dump the entire data segment */ 238 /* default is to dump the entire data segment */
@@ -306,7 +306,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
306static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, 306static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
307 size_t count, loff_t *ppos) 307 size_t count, loff_t *ppos)
308{ 308{
309 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 309 struct iwl_priv *priv = file->private_data;
310 struct iwl_station_entry *station; 310 struct iwl_station_entry *station;
311 int max_sta = priv->hw_params.max_stations; 311 int max_sta = priv->hw_params.max_stations;
312 char *buf; 312 char *buf;
@@ -376,7 +376,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
376 loff_t *ppos) 376 loff_t *ppos)
377{ 377{
378 ssize_t ret; 378 ssize_t ret;
379 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 379 struct iwl_priv *priv = file->private_data;
380 int pos = 0, ofs = 0, buf_size = 0; 380 int pos = 0, ofs = 0, buf_size = 0;
381 const u8 *ptr; 381 const u8 *ptr;
382 char *buf; 382 char *buf;
@@ -420,6 +420,23 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
420 return ret; 420 return ret;
421} 421}
422 422
423static ssize_t iwl_dbgfs_log_event_read(struct file *file,
424 char __user *user_buf,
425 size_t count, loff_t *ppos)
426{
427 struct iwl_priv *priv = file->private_data;
428 char *buf;
429 int pos = 0;
430 ssize_t ret = -ENOMEM;
431
432 pos = priv->cfg->ops->lib->dump_nic_event_log(priv, true, &buf, true);
433 if (pos && buf) {
434 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
435 kfree(buf);
436 }
437 return ret;
438}
439
423static ssize_t iwl_dbgfs_log_event_write(struct file *file, 440static ssize_t iwl_dbgfs_log_event_write(struct file *file,
424 const char __user *user_buf, 441 const char __user *user_buf,
425 size_t count, loff_t *ppos) 442 size_t count, loff_t *ppos)
@@ -436,7 +453,8 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
436 if (sscanf(buf, "%d", &event_log_flag) != 1) 453 if (sscanf(buf, "%d", &event_log_flag) != 1)
437 return -EFAULT; 454 return -EFAULT;
438 if (event_log_flag == 1) 455 if (event_log_flag == 1)
439 priv->cfg->ops->lib->dump_nic_event_log(priv, true); 456 priv->cfg->ops->lib->dump_nic_event_log(priv, true,
457 NULL, false);
440 458
441 return count; 459 return count;
442} 460}
@@ -446,7 +464,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
446static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, 464static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
447 size_t count, loff_t *ppos) 465 size_t count, loff_t *ppos)
448{ 466{
449 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 467 struct iwl_priv *priv = file->private_data;
450 struct ieee80211_channel *channels = NULL; 468 struct ieee80211_channel *channels = NULL;
451 const struct ieee80211_supported_band *supp_band = NULL; 469 const struct ieee80211_supported_band *supp_band = NULL;
452 int pos = 0, i, bufsz = PAGE_SIZE; 470 int pos = 0, i, bufsz = PAGE_SIZE;
@@ -519,7 +537,7 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
519 char __user *user_buf, 537 char __user *user_buf,
520 size_t count, loff_t *ppos) { 538 size_t count, loff_t *ppos) {
521 539
522 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 540 struct iwl_priv *priv = file->private_data;
523 char buf[512]; 541 char buf[512];
524 int pos = 0; 542 int pos = 0;
525 const size_t bufsz = sizeof(buf); 543 const size_t bufsz = sizeof(buf);
@@ -567,7 +585,7 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
567 char __user *user_buf, 585 char __user *user_buf,
568 size_t count, loff_t *ppos) { 586 size_t count, loff_t *ppos) {
569 587
570 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 588 struct iwl_priv *priv = file->private_data;
571 int pos = 0; 589 int pos = 0;
572 int cnt = 0; 590 int cnt = 0;
573 char *buf; 591 char *buf;
@@ -654,7 +672,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
654static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, 672static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
655 size_t count, loff_t *ppos) 673 size_t count, loff_t *ppos)
656{ 674{
657 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 675 struct iwl_priv *priv = file->private_data;
658 int pos = 0, i; 676 int pos = 0, i;
659 char buf[256]; 677 char buf[256];
660 const size_t bufsz = sizeof(buf); 678 const size_t bufsz = sizeof(buf);
@@ -677,7 +695,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
677static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, 695static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
678 size_t count, loff_t *ppos) 696 size_t count, loff_t *ppos)
679{ 697{
680 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 698 struct iwl_priv *priv = file->private_data;
681 int pos = 0; 699 int pos = 0;
682 char buf[256]; 700 char buf[256];
683 const size_t bufsz = sizeof(buf); 701 const size_t bufsz = sizeof(buf);
@@ -703,7 +721,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
703 char __user *user_buf, 721 char __user *user_buf,
704 size_t count, loff_t *ppos) 722 size_t count, loff_t *ppos)
705{ 723{
706 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 724 struct iwl_priv *priv = file->private_data;
707 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 725 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
708 struct iwl_tt_restriction *restriction; 726 struct iwl_tt_restriction *restriction;
709 char buf[100]; 727 char buf[100];
@@ -763,7 +781,7 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
763 char __user *user_buf, 781 char __user *user_buf,
764 size_t count, loff_t *ppos) 782 size_t count, loff_t *ppos)
765{ 783{
766 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 784 struct iwl_priv *priv = file->private_data;
767 char buf[100]; 785 char buf[100];
768 int pos = 0; 786 int pos = 0;
769 const size_t bufsz = sizeof(buf); 787 const size_t bufsz = sizeof(buf);
@@ -820,7 +838,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
820 char __user *user_buf, 838 char __user *user_buf,
821 size_t count, loff_t *ppos) 839 size_t count, loff_t *ppos)
822{ 840{
823 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 841 struct iwl_priv *priv = file->private_data;
824 char buf[10]; 842 char buf[10];
825 int pos, value; 843 int pos, value;
826 const size_t bufsz = sizeof(buf); 844 const size_t bufsz = sizeof(buf);
@@ -838,7 +856,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
838 char __user *user_buf, 856 char __user *user_buf,
839 size_t count, loff_t *ppos) 857 size_t count, loff_t *ppos)
840{ 858{
841 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 859 struct iwl_priv *priv = file->private_data;
842 char buf[200]; 860 char buf[200];
843 int pos = 0, i; 861 int pos = 0, i;
844 const size_t bufsz = sizeof(buf); 862 const size_t bufsz = sizeof(buf);
@@ -859,7 +877,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
859} 877}
860 878
861DEBUGFS_READ_WRITE_FILE_OPS(sram); 879DEBUGFS_READ_WRITE_FILE_OPS(sram);
862DEBUGFS_WRITE_FILE_OPS(log_event); 880DEBUGFS_READ_WRITE_FILE_OPS(log_event);
863DEBUGFS_READ_FILE_OPS(nvm); 881DEBUGFS_READ_FILE_OPS(nvm);
864DEBUGFS_READ_FILE_OPS(stations); 882DEBUGFS_READ_FILE_OPS(stations);
865DEBUGFS_READ_FILE_OPS(channels); 883DEBUGFS_READ_FILE_OPS(channels);
@@ -976,7 +994,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
976 char __user *user_buf, 994 char __user *user_buf,
977 size_t count, loff_t *ppos) { 995 size_t count, loff_t *ppos) {
978 996
979 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 997 struct iwl_priv *priv = file->private_data;
980 struct iwl_tx_queue *txq; 998 struct iwl_tx_queue *txq;
981 struct iwl_queue *q; 999 struct iwl_queue *q;
982 char *buf; 1000 char *buf;
@@ -1022,7 +1040,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1022 char __user *user_buf, 1040 char __user *user_buf,
1023 size_t count, loff_t *ppos) { 1041 size_t count, loff_t *ppos) {
1024 1042
1025 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1043 struct iwl_priv *priv = file->private_data;
1026 struct iwl_rx_queue *rxq = &priv->rxq; 1044 struct iwl_rx_queue *rxq = &priv->rxq;
1027 char buf[256]; 1045 char buf[256];
1028 int pos = 0; 1046 int pos = 0;
@@ -1068,7 +1086,7 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1068 char __user *user_buf, 1086 char __user *user_buf,
1069 size_t count, loff_t *ppos) 1087 size_t count, loff_t *ppos)
1070{ 1088{
1071 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1089 struct iwl_priv *priv = file->private_data;
1072 int pos = 0; 1090 int pos = 0;
1073 char *buf; 1091 char *buf;
1074 int bufsz = sizeof(struct statistics_rx_phy) * 20 + 1092 int bufsz = sizeof(struct statistics_rx_phy) * 20 +
@@ -1369,6 +1387,9 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1369 accum_ht->agg_mpdu_cnt); 1387 accum_ht->agg_mpdu_cnt);
1370 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n", 1388 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
1371 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt); 1389 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
1390 pos += scnprintf(buf + pos, bufsz - pos, "unsupport_mcs:\t\t%u\t\t\t%u\n",
1391 le32_to_cpu(ht->unsupport_mcs),
1392 accum_ht->unsupport_mcs);
1372 1393
1373 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1394 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1374 kfree(buf); 1395 kfree(buf);
@@ -1379,7 +1400,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1379 char __user *user_buf, 1400 char __user *user_buf,
1380 size_t count, loff_t *ppos) 1401 size_t count, loff_t *ppos)
1381{ 1402{
1382 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1403 struct iwl_priv *priv = file->private_data;
1383 int pos = 0; 1404 int pos = 0;
1384 char *buf; 1405 char *buf;
1385 int bufsz = (sizeof(struct statistics_tx) * 24) + 250; 1406 int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
@@ -1521,7 +1542,7 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1521 char __user *user_buf, 1542 char __user *user_buf,
1522 size_t count, loff_t *ppos) 1543 size_t count, loff_t *ppos)
1523{ 1544{
1524 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1545 struct iwl_priv *priv = file->private_data;
1525 int pos = 0; 1546 int pos = 0;
1526 char *buf; 1547 char *buf;
1527 int bufsz = sizeof(struct statistics_general) * 4 + 250; 1548 int bufsz = sizeof(struct statistics_general) * 4 + 250;
@@ -1612,7 +1633,7 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
1612 char __user *user_buf, 1633 char __user *user_buf,
1613 size_t count, loff_t *ppos) { 1634 size_t count, loff_t *ppos) {
1614 1635
1615 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1636 struct iwl_priv *priv = file->private_data;
1616 int pos = 0; 1637 int pos = 0;
1617 int cnt = 0; 1638 int cnt = 0;
1618 char *buf; 1639 char *buf;
@@ -1693,7 +1714,7 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
1693 char __user *user_buf, 1714 char __user *user_buf,
1694 size_t count, loff_t *ppos) { 1715 size_t count, loff_t *ppos) {
1695 1716
1696 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1717 struct iwl_priv *priv = file->private_data;
1697 int pos = 0; 1718 int pos = 0;
1698 int cnt = 0; 1719 int cnt = 0;
1699 char *buf; 1720 char *buf;
@@ -1751,7 +1772,7 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
1751 char __user *user_buf, 1772 char __user *user_buf,
1752 size_t count, loff_t *ppos) { 1773 size_t count, loff_t *ppos) {
1753 1774
1754 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1775 struct iwl_priv *priv = file->private_data;
1755 char buf[128]; 1776 char buf[128];
1756 int pos = 0; 1777 int pos = 0;
1757 ssize_t ret; 1778 ssize_t ret;
@@ -1802,7 +1823,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
1802 char __user *user_buf, 1823 char __user *user_buf,
1803 size_t count, loff_t *ppos) 1824 size_t count, loff_t *ppos)
1804{ 1825{
1805 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 1826 struct iwl_priv *priv = file->private_data;
1806 char buf[60]; 1827 char buf[60];
1807 int pos = 0; 1828 int pos = 0;
1808 const size_t bufsz = sizeof(buf); 1829 const size_t bufsz = sizeof(buf);
@@ -1845,6 +1866,80 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
1845 return count; 1866 return count;
1846} 1867}
1847 1868
1869static ssize_t iwl_dbgfs_csr_write(struct file *file,
1870 const char __user *user_buf,
1871 size_t count, loff_t *ppos)
1872{
1873 struct iwl_priv *priv = file->private_data;
1874 char buf[8];
1875 int buf_size;
1876 int csr;
1877
1878 memset(buf, 0, sizeof(buf));
1879 buf_size = min(count, sizeof(buf) - 1);
1880 if (copy_from_user(buf, user_buf, buf_size))
1881 return -EFAULT;
1882 if (sscanf(buf, "%d", &csr) != 1)
1883 return -EFAULT;
1884
1885 if (priv->cfg->ops->lib->dump_csr)
1886 priv->cfg->ops->lib->dump_csr(priv);
1887
1888 return count;
1889}
1890
1891static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
1892 char __user *user_buf,
1893 size_t count, loff_t *ppos) {
1894
1895 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1896 int pos = 0;
1897 char buf[128];
1898 const size_t bufsz = sizeof(buf);
1899 ssize_t ret;
1900
1901 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
1902 priv->event_log.ucode_trace ? "On" : "Off");
1903 pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
1904 priv->event_log.non_wraps_count);
1905 pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
1906 priv->event_log.wraps_once_count);
1907 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
1908 priv->event_log.wraps_more_count);
1909
1910 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1911 return ret;
1912}
1913
1914static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
1915 const char __user *user_buf,
1916 size_t count, loff_t *ppos)
1917{
1918 struct iwl_priv *priv = file->private_data;
1919 char buf[8];
1920 int buf_size;
1921 int trace;
1922
1923 memset(buf, 0, sizeof(buf));
1924 buf_size = min(count, sizeof(buf) - 1);
1925 if (copy_from_user(buf, user_buf, buf_size))
1926 return -EFAULT;
1927 if (sscanf(buf, "%d", &trace) != 1)
1928 return -EFAULT;
1929
1930 if (trace) {
1931 priv->event_log.ucode_trace = true;
1932 /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
1933 mod_timer(&priv->ucode_trace,
1934 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
1935 } else {
1936 priv->event_log.ucode_trace = false;
1937 del_timer_sync(&priv->ucode_trace);
1938 }
1939
1940 return count;
1941}
1942
1848DEBUGFS_READ_FILE_OPS(rx_statistics); 1943DEBUGFS_READ_FILE_OPS(rx_statistics);
1849DEBUGFS_READ_FILE_OPS(tx_statistics); 1944DEBUGFS_READ_FILE_OPS(tx_statistics);
1850DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 1945DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1859,6 +1954,8 @@ DEBUGFS_READ_FILE_OPS(tx_power);
1859DEBUGFS_READ_FILE_OPS(power_save_status); 1954DEBUGFS_READ_FILE_OPS(power_save_status);
1860DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); 1955DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1861DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); 1956DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1957DEBUGFS_WRITE_FILE_OPS(csr);
1958DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
1862 1959
1863/* 1960/*
1864 * Create the debugfs files and directories 1961 * Create the debugfs files and directories
@@ -1889,7 +1986,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1889 DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv); 1986 DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv);
1890 DEBUGFS_ADD_FILE(nvm, data, S_IRUSR); 1987 DEBUGFS_ADD_FILE(nvm, data, S_IRUSR);
1891 DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR); 1988 DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR);
1892 DEBUGFS_ADD_FILE(log_event, data, S_IWUSR); 1989 DEBUGFS_ADD_FILE(log_event, data, S_IWUSR | S_IRUSR);
1893 DEBUGFS_ADD_FILE(stations, data, S_IRUSR); 1990 DEBUGFS_ADD_FILE(stations, data, S_IRUSR);
1894 DEBUGFS_ADD_FILE(channels, data, S_IRUSR); 1991 DEBUGFS_ADD_FILE(channels, data, S_IRUSR);
1895 DEBUGFS_ADD_FILE(status, data, S_IRUSR); 1992 DEBUGFS_ADD_FILE(status, data, S_IRUSR);
@@ -1909,12 +2006,14 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1909 DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR); 2006 DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR);
1910 DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR); 2007 DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR);
1911 DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR); 2008 DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR);
2009 DEBUGFS_ADD_FILE(csr, debug, S_IWUSR);
1912 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 2010 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1913 DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR); 2011 DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR);
1914 DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR); 2012 DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR);
1915 DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR); 2013 DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR);
1916 DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR); 2014 DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR);
1917 DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR); 2015 DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR);
2016 DEBUGFS_ADD_FILE(ucode_tracing, debug, S_IWUSR | S_IRUSR);
1918 } 2017 }
1919 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal); 2018 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
1920 DEBUGFS_ADD_BOOL(disable_chain_noise, rf, 2019 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
@@ -1966,6 +2065,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1966 file_clear_ucode_statistics); 2065 file_clear_ucode_statistics);
1967 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. 2066 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1968 file_clear_traffic_statistics); 2067 file_clear_traffic_statistics);
2068 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_csr);
1969 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 2069 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1970 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. 2070 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1971 file_ucode_rx_stats); 2071 file_ucode_rx_stats);
@@ -1977,6 +2077,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1977 file_sensitivity); 2077 file_sensitivity);
1978 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. 2078 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1979 file_chain_noise); 2079 file_chain_noise);
2080 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
2081 file_ucode_tracing);
1980 } 2082 }
1981 DEBUGFS_REMOVE(priv->dbgfs->dir_debug); 2083 DEBUGFS_REMOVE(priv->dbgfs->dir_debug);
1982 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity); 2084 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 3822cf53e36..70f0e79c8e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -512,6 +512,7 @@ struct iwl_ht_config {
512 bool is_ht; 512 bool is_ht;
513 bool is_40mhz; 513 bool is_40mhz;
514 bool single_chain_sufficient; 514 bool single_chain_sufficient;
515 enum ieee80211_smps_mode smps; /* current smps mode */
515 /* BSS related data */ 516 /* BSS related data */
516 u8 extension_chan_offset; 517 u8 extension_chan_offset;
517 u8 ht_protection; 518 u8 ht_protection;
@@ -984,6 +985,32 @@ struct iwl_switch_rxon {
984 __le16 channel; 985 __le16 channel;
985}; 986};
986 987
988/*
989 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
990 * to perform continuous uCode event logging operation if enabled
991 */
992#define UCODE_TRACE_PERIOD (100)
993
994/*
995 * iwl_event_log: current uCode event log position
996 *
997 * @ucode_trace: enable/disable ucode continuous trace timer
998 * @num_wraps: how many times the event buffer wraps
999 * @next_entry: the entry just before the next one that uCode would fill
1000 * @non_wraps_count: counter for no wrap detected when dump ucode events
1001 * @wraps_once_count: counter for wrap once detected when dump ucode events
1002 * @wraps_more_count: counter for wrap more than once detected
1003 * when dump ucode events
1004 */
1005struct iwl_event_log {
1006 bool ucode_trace;
1007 u32 num_wraps;
1008 u32 next_entry;
1009 int non_wraps_count;
1010 int wraps_once_count;
1011 int wraps_more_count;
1012};
1013
987struct iwl_priv { 1014struct iwl_priv {
988 1015
989 /* ieee device used by generic ieee processing code */ 1016 /* ieee device used by generic ieee processing code */
@@ -1261,6 +1288,7 @@ struct iwl_priv {
1261 u32 disable_tx_power_cal; 1288 u32 disable_tx_power_cal;
1262 struct work_struct run_time_calib_work; 1289 struct work_struct run_time_calib_work;
1263 struct timer_list statistics_periodic; 1290 struct timer_list statistics_periodic;
1291 struct timer_list ucode_trace;
1264 bool hw_ready; 1292 bool hw_ready;
1265 /*For 3945*/ 1293 /*For 3945*/
1266#define IWL_DEFAULT_TX_POWER 0x0F 1294#define IWL_DEFAULT_TX_POWER 0x0F
@@ -1268,6 +1296,8 @@ struct iwl_priv {
1268 struct iwl3945_notif_statistics statistics_39; 1296 struct iwl3945_notif_statistics statistics_39;
1269 1297
1270 u32 sta_supp_rates; 1298 u32 sta_supp_rates;
1299
1300 struct iwl_event_log event_log;
1271}; /*iwl_priv */ 1301}; /*iwl_priv */
1272 1302
1273static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 1303static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 83cc4e500a9..36580d8d8b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,4 +37,6 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); 37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); 38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); 39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
40#endif 42#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index d9c7363b1bb..ff4d012ce26 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -91,6 +91,50 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
91); 91);
92 92
93#undef TRACE_SYSTEM 93#undef TRACE_SYSTEM
94#define TRACE_SYSTEM iwlwifi_ucode
95
96TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
97 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
98 TP_ARGS(priv, time, data, ev),
99 TP_STRUCT__entry(
100 PRIV_ENTRY
101
102 __field(u32, time)
103 __field(u32, data)
104 __field(u32, ev)
105 ),
106 TP_fast_assign(
107 PRIV_ASSIGN;
108 __entry->time = time;
109 __entry->data = data;
110 __entry->ev = ev;
111 ),
112 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
113 __entry->priv, __entry->time, __entry->data, __entry->ev)
114);
115
116TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
117 TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
118 TP_ARGS(priv, wraps, n_entry, p_entry),
119 TP_STRUCT__entry(
120 PRIV_ENTRY
121
122 __field(u32, wraps)
123 __field(u32, n_entry)
124 __field(u32, p_entry)
125 ),
126 TP_fast_assign(
127 PRIV_ASSIGN;
128 __entry->wraps = wraps;
129 __entry->n_entry = n_entry;
130 __entry->p_entry = p_entry;
131 ),
132 TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
133 __entry->priv, __entry->wraps, __entry->n_entry,
134 __entry->p_entry)
135);
136
137#undef TRACE_SYSTEM
94#define TRACE_SYSTEM iwlwifi 138#define TRACE_SYSTEM iwlwifi
95 139
96TRACE_EVENT(iwlwifi_dev_hcmd, 140TRACE_EVENT(iwlwifi_dev_hcmd,
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 30e9ea6d54e..87d684efe11 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -58,7 +58,6 @@ const char *get_cmd_string(u8 cmd)
58 IWL_CMD(COEX_PRIORITY_TABLE_CMD); 58 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
59 IWL_CMD(COEX_MEDIUM_NOTIFICATION); 59 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
60 IWL_CMD(COEX_EVENT_CMD); 60 IWL_CMD(COEX_EVENT_CMD);
61 IWL_CMD(RADAR_NOTIFICATION);
62 IWL_CMD(REPLY_QUIET_CMD); 61 IWL_CMD(REPLY_QUIET_CMD);
63 IWL_CMD(REPLY_CHANNEL_SWITCH); 62 IWL_CMD(REPLY_CHANNEL_SWITCH);
64 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); 63 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index f8e4e4b18d0..10b0aa8024c 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1518,8 +1518,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1518 * iwl3945_print_event_log - Dump error event log to syslog 1518 * iwl3945_print_event_log - Dump error event log to syslog
1519 * 1519 *
1520 */ 1520 */
1521static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, 1521static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1522 u32 num_events, u32 mode) 1522 u32 num_events, u32 mode,
1523 int pos, char **buf, size_t bufsz)
1523{ 1524{
1524 u32 i; 1525 u32 i;
1525 u32 base; /* SRAM byte address of event log header */ 1526 u32 base; /* SRAM byte address of event log header */
@@ -1529,7 +1530,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1529 unsigned long reg_flags; 1530 unsigned long reg_flags;
1530 1531
1531 if (num_events == 0) 1532 if (num_events == 0)
1532 return; 1533 return pos;
1533 1534
1534 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1535 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1535 1536
@@ -1555,26 +1556,43 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1555 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1556 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1556 if (mode == 0) { 1557 if (mode == 0) {
1557 /* data, ev */ 1558 /* data, ev */
1558 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1559 if (bufsz) {
1559 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev); 1560 pos += scnprintf(*buf + pos, bufsz - pos,
1561 "0x%08x:%04u\n",
1562 time, ev);
1563 } else {
1564 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1565 trace_iwlwifi_dev_ucode_event(priv, 0,
1566 time, ev);
1567 }
1560 } else { 1568 } else {
1561 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1569 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1562 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev); 1570 if (bufsz) {
1563 trace_iwlwifi_dev_ucode_event(priv, time, data, ev); 1571 pos += scnprintf(*buf + pos, bufsz - pos,
1572 "%010u:0x%08x:%04u\n",
1573 time, data, ev);
1574 } else {
1575 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
1576 time, data, ev);
1577 trace_iwlwifi_dev_ucode_event(priv, time,
1578 data, ev);
1579 }
1564 } 1580 }
1565 } 1581 }
1566 1582
1567 /* Allow device to power down */ 1583 /* Allow device to power down */
1568 iwl_release_nic_access(priv); 1584 iwl_release_nic_access(priv);
1569 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 1585 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1586 return pos;
1570} 1587}
1571 1588
1572/** 1589/**
1573 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog 1590 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
1574 */ 1591 */
1575static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, 1592static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1576 u32 num_wraps, u32 next_entry, 1593 u32 num_wraps, u32 next_entry,
1577 u32 size, u32 mode) 1594 u32 size, u32 mode,
1595 int pos, char **buf, size_t bufsz)
1578{ 1596{
1579 /* 1597 /*
1580 * display the newest DEFAULT_LOG_ENTRIES entries 1598 * display the newest DEFAULT_LOG_ENTRIES entries
@@ -1582,21 +1600,28 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1582 */ 1600 */
1583 if (num_wraps) { 1601 if (num_wraps) {
1584 if (next_entry < size) { 1602 if (next_entry < size) {
1585 iwl3945_print_event_log(priv, 1603 pos = iwl3945_print_event_log(priv,
1586 capacity - (size - next_entry), 1604 capacity - (size - next_entry),
1587 size - next_entry, mode); 1605 size - next_entry, mode,
1588 iwl3945_print_event_log(priv, 0, 1606 pos, buf, bufsz);
1589 next_entry, mode); 1607 pos = iwl3945_print_event_log(priv, 0,
1608 next_entry, mode,
1609 pos, buf, bufsz);
1590 } else 1610 } else
1591 iwl3945_print_event_log(priv, next_entry - size, 1611 pos = iwl3945_print_event_log(priv, next_entry - size,
1592 size, mode); 1612 size, mode,
1613 pos, buf, bufsz);
1593 } else { 1614 } else {
1594 if (next_entry < size) 1615 if (next_entry < size)
1595 iwl3945_print_event_log(priv, 0, next_entry, mode); 1616 pos = iwl3945_print_event_log(priv, 0,
1617 next_entry, mode,
1618 pos, buf, bufsz);
1596 else 1619 else
1597 iwl3945_print_event_log(priv, next_entry - size, 1620 pos = iwl3945_print_event_log(priv, next_entry - size,
1598 size, mode); 1621 size, mode,
1622 pos, buf, bufsz);
1599 } 1623 }
1624 return pos;
1600} 1625}
1601 1626
1602/* For sanity check only. Actual size is determined by uCode, typ. 512 */ 1627/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1604,7 +1629,8 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1604 1629
1605#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) 1630#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
1606 1631
1607void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log) 1632int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1633 char **buf, bool display)
1608{ 1634{
1609 u32 base; /* SRAM byte address of event log header */ 1635 u32 base; /* SRAM byte address of event log header */
1610 u32 capacity; /* event log capacity in # entries */ 1636 u32 capacity; /* event log capacity in # entries */
@@ -1612,11 +1638,13 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1612 u32 num_wraps; /* # times uCode wrapped to top of log */ 1638 u32 num_wraps; /* # times uCode wrapped to top of log */
1613 u32 next_entry; /* index of next entry to be written by uCode */ 1639 u32 next_entry; /* index of next entry to be written by uCode */
1614 u32 size; /* # entries that we'll print */ 1640 u32 size; /* # entries that we'll print */
1641 int pos = 0;
1642 size_t bufsz = 0;
1615 1643
1616 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1644 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1617 if (!iwl3945_hw_valid_rtc_data_addr(base)) { 1645 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1618 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); 1646 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1619 return; 1647 return pos;
1620 } 1648 }
1621 1649
1622 /* event log header */ 1650 /* event log header */
@@ -1642,7 +1670,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1642 /* bail out if nothing in log */ 1670 /* bail out if nothing in log */
1643 if (size == 0) { 1671 if (size == 0) {
1644 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); 1672 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1645 return; 1673 return pos;
1646 } 1674 }
1647 1675
1648#ifdef CONFIG_IWLWIFI_DEBUG 1676#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1658,25 +1686,38 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1658 size); 1686 size);
1659 1687
1660#ifdef CONFIG_IWLWIFI_DEBUG 1688#ifdef CONFIG_IWLWIFI_DEBUG
1689 if (display) {
1690 if (full_log)
1691 bufsz = capacity * 48;
1692 else
1693 bufsz = size * 48;
1694 *buf = kmalloc(bufsz, GFP_KERNEL);
1695 if (!*buf)
1696 return pos;
1697 }
1661 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 1698 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1662 /* if uCode has wrapped back to top of log, 1699 /* if uCode has wrapped back to top of log,
1663 * start at the oldest entry, 1700 * start at the oldest entry,
1664 * i.e the next one that uCode would fill. 1701 * i.e the next one that uCode would fill.
1665 */ 1702 */
1666 if (num_wraps) 1703 if (num_wraps)
1667 iwl3945_print_event_log(priv, next_entry, 1704 pos = iwl3945_print_event_log(priv, next_entry,
1668 capacity - next_entry, mode); 1705 capacity - next_entry, mode,
1706 pos, buf, bufsz);
1669 1707
1670 /* (then/else) start at top of log */ 1708 /* (then/else) start at top of log */
1671 iwl3945_print_event_log(priv, 0, next_entry, mode); 1709 pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
1710 pos, buf, bufsz);
1672 } else 1711 } else
1673 iwl3945_print_last_event_logs(priv, capacity, num_wraps, 1712 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1674 next_entry, size, mode); 1713 next_entry, size, mode,
1714 pos, buf, bufsz);
1675#else 1715#else
1676 iwl3945_print_last_event_logs(priv, capacity, num_wraps, 1716 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1677 next_entry, size, mode); 1717 next_entry, size, mode,
1718 pos, buf, bufsz);
1678#endif 1719#endif
1679 1720 return pos;
1680} 1721}
1681 1722
1682static void iwl3945_irq_tasklet(struct iwl_priv *priv) 1723static void iwl3945_irq_tasklet(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 842811142be..79ffa3b98d7 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
268 268
269 struct sk_buff_head rx_list; 269 struct sk_buff_head rx_list;
270 struct list_head rx_tickets; 270 struct list_head rx_tickets;
271 struct list_head rx_packets[IWM_RX_ID_HASH + 1]; 271 struct list_head rx_packets[IWM_RX_ID_HASH];
272 struct workqueue_struct *rx_wq; 272 struct workqueue_struct *rx_wq;
273 struct work_struct rx_worker; 273 struct work_struct rx_worker;
274 274
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 6d6ed748517..d32adeab68a 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -868,36 +868,35 @@ static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
868 struct iwm_umac_notif_mgt_frame *mgt_frame = 868 struct iwm_umac_notif_mgt_frame *mgt_frame =
869 (struct iwm_umac_notif_mgt_frame *)buf; 869 (struct iwm_umac_notif_mgt_frame *)buf;
870 struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame; 870 struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
871 u8 *ie;
872 871
873 IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame, 872 IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
874 le16_to_cpu(mgt_frame->len)); 873 le16_to_cpu(mgt_frame->len));
875 874
876 if (ieee80211_is_assoc_req(mgt->frame_control)) { 875 if (ieee80211_is_assoc_req(mgt->frame_control)) {
877 ie = mgt->u.assoc_req.variable;; 876 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
878 iwm->req_ie_len = 877 - offsetof(struct ieee80211_mgmt,
879 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 878 u.assoc_req.variable);
880 kfree(iwm->req_ie); 879 kfree(iwm->req_ie);
881 iwm->req_ie = kmemdup(mgt->u.assoc_req.variable, 880 iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
882 iwm->req_ie_len, GFP_KERNEL); 881 iwm->req_ie_len, GFP_KERNEL);
883 } else if (ieee80211_is_reassoc_req(mgt->frame_control)) { 882 } else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
884 ie = mgt->u.reassoc_req.variable;; 883 iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
885 iwm->req_ie_len = 884 - offsetof(struct ieee80211_mgmt,
886 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 885 u.reassoc_req.variable);
887 kfree(iwm->req_ie); 886 kfree(iwm->req_ie);
888 iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable, 887 iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
889 iwm->req_ie_len, GFP_KERNEL); 888 iwm->req_ie_len, GFP_KERNEL);
890 } else if (ieee80211_is_assoc_resp(mgt->frame_control)) { 889 } else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
891 ie = mgt->u.assoc_resp.variable;; 890 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
892 iwm->resp_ie_len = 891 - offsetof(struct ieee80211_mgmt,
893 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 892 u.assoc_resp.variable);
894 kfree(iwm->resp_ie); 893 kfree(iwm->resp_ie);
895 iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable, 894 iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
896 iwm->resp_ie_len, GFP_KERNEL); 895 iwm->resp_ie_len, GFP_KERNEL);
897 } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) { 896 } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
898 ie = mgt->u.reassoc_resp.variable;; 897 iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
899 iwm->resp_ie_len = 898 - offsetof(struct ieee80211_mgmt,
900 le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt); 899 u.reassoc_resp.variable);
901 kfree(iwm->resp_ie); 900 kfree(iwm->resp_ie);
902 iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable, 901 iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
903 iwm->resp_ie_len, GFP_KERNEL); 902 iwm->resp_ie_len, GFP_KERNEL);
@@ -1534,6 +1533,33 @@ static void classify8023(struct sk_buff *skb)
1534 } 1533 }
1535} 1534}
1536 1535
1536static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
1537{
1538 struct wireless_dev *wdev = iwm_to_wdev(iwm);
1539 struct net_device *ndev = iwm_to_ndev(iwm);
1540 struct sk_buff_head list;
1541 struct sk_buff *frame;
1542
1543 IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
1544
1545 __skb_queue_head_init(&list);
1546 ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0);
1547
1548 while ((frame = __skb_dequeue(&list))) {
1549 ndev->stats.rx_packets++;
1550 ndev->stats.rx_bytes += frame->len;
1551
1552 frame->protocol = eth_type_trans(frame, ndev);
1553 frame->ip_summed = CHECKSUM_NONE;
1554 memset(frame->cb, 0, sizeof(frame->cb));
1555
1556 if (netif_rx_ni(frame) == NET_RX_DROP) {
1557 IWM_ERR(iwm, "Packet dropped\n");
1558 ndev->stats.rx_dropped++;
1559 }
1560 }
1561}
1562
1537static void iwm_rx_process_packet(struct iwm_priv *iwm, 1563static void iwm_rx_process_packet(struct iwm_priv *iwm,
1538 struct iwm_rx_packet *packet, 1564 struct iwm_rx_packet *packet,
1539 struct iwm_rx_ticket_node *ticket_node) 1565 struct iwm_rx_ticket_node *ticket_node)
@@ -1548,25 +1574,34 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
1548 switch (le16_to_cpu(ticket_node->ticket->action)) { 1574 switch (le16_to_cpu(ticket_node->ticket->action)) {
1549 case IWM_RX_TICKET_RELEASE: 1575 case IWM_RX_TICKET_RELEASE:
1550 IWM_DBG_RX(iwm, DBG, "RELEASE packet\n"); 1576 IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
1551 classify8023(skb); 1577
1552 iwm_rx_adjust_packet(iwm, packet, ticket_node); 1578 iwm_rx_adjust_packet(iwm, packet, ticket_node);
1579 skb->dev = iwm_to_ndev(iwm);
1580 classify8023(skb);
1581
1582 if (le16_to_cpu(ticket_node->ticket->flags) &
1583 IWM_RX_TICKET_AMSDU_MSK) {
1584 iwm_rx_process_amsdu(iwm, skb);
1585 break;
1586 }
1587
1553 ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype); 1588 ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
1554 if (ret < 0) { 1589 if (ret < 0) {
1555 IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - " 1590 IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
1556 "%d\n", ret); 1591 "%d\n", ret);
1592 kfree_skb(packet->skb);
1557 break; 1593 break;
1558 } 1594 }
1559 1595
1560 IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len); 1596 IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
1561 1597
1562 skb->dev = iwm_to_ndev(iwm); 1598 ndev->stats.rx_packets++;
1599 ndev->stats.rx_bytes += skb->len;
1600
1563 skb->protocol = eth_type_trans(skb, ndev); 1601 skb->protocol = eth_type_trans(skb, ndev);
1564 skb->ip_summed = CHECKSUM_NONE; 1602 skb->ip_summed = CHECKSUM_NONE;
1565 memset(skb->cb, 0, sizeof(skb->cb)); 1603 memset(skb->cb, 0, sizeof(skb->cb));
1566 1604
1567 ndev->stats.rx_packets++;
1568 ndev->stats.rx_bytes += skb->len;
1569
1570 if (netif_rx_ni(skb) == NET_RX_DROP) { 1605 if (netif_rx_ni(skb) == NET_RX_DROP) {
1571 IWM_ERR(iwm, "Packet dropped\n"); 1606 IWM_ERR(iwm, "Packet dropped\n");
1572 ndev->stats.rx_dropped++; 1607 ndev->stats.rx_dropped++;
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
index 30aa9d48d67..0485c995757 100644
--- a/drivers/net/wireless/libertas/Kconfig
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -37,3 +37,9 @@ config LIBERTAS_DEBUG
37 depends on LIBERTAS 37 depends on LIBERTAS
38 ---help--- 38 ---help---
39 Debugging support. 39 Debugging support.
40
41config LIBERTAS_MESH
42 bool "Enable mesh support"
43 depends on LIBERTAS
44 help
45 This enables Libertas' MESH support, used by e.g. the OLPC people.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index b188cd97a05..45e870e3311 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -5,11 +5,11 @@ libertas-y += cmdresp.o
5libertas-y += debugfs.o 5libertas-y += debugfs.o
6libertas-y += ethtool.o 6libertas-y += ethtool.o
7libertas-y += main.o 7libertas-y += main.o
8libertas-y += mesh.o
9libertas-y += rx.o 8libertas-y += rx.o
10libertas-y += scan.o 9libertas-y += scan.o
11libertas-y += tx.o 10libertas-y += tx.o
12libertas-y += wext.o 11libertas-y += wext.o
12libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
13 13
14usb8xxx-objs += if_usb.o 14usb8xxx-objs += if_usb.o
15libertas_cs-objs += if_cs.o 15libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 751067369ba..5e650f35841 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -390,10 +390,8 @@ int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
390 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto); 390 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
391 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto); 391 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
392 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd); 392 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
393 if (!ret && cmd_action == CMD_ACT_GET) { 393 if (!ret && cmd_action == CMD_ACT_GET)
394 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
395 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto); 394 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
396 }
397 395
398 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 396 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
399 return ret; 397 return ret;
@@ -807,8 +805,7 @@ static int lbs_try_associate(struct lbs_private *priv,
807 } 805 }
808 806
809 /* Use short preamble only when both the BSS and firmware support it */ 807 /* Use short preamble only when both the BSS and firmware support it */
810 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) && 808 if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
811 (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
812 preamble = RADIO_PREAMBLE_SHORT; 809 preamble = RADIO_PREAMBLE_SHORT;
813 810
814 ret = lbs_set_radio(priv, preamble, 1); 811 ret = lbs_set_radio(priv, preamble, 1);
@@ -939,8 +936,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
939 } 936 }
940 937
941 /* Use short preamble only when both the BSS and firmware support it */ 938 /* Use short preamble only when both the BSS and firmware support it */
942 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) && 939 if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
943 (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
944 lbs_deb_join("AdhocJoin: Short preamble\n"); 940 lbs_deb_join("AdhocJoin: Short preamble\n");
945 preamble = RADIO_PREAMBLE_SHORT; 941 preamble = RADIO_PREAMBLE_SHORT;
946 } 942 }
@@ -1049,7 +1045,7 @@ static int lbs_adhoc_start(struct lbs_private *priv,
1049 struct assoc_request *assoc_req) 1045 struct assoc_request *assoc_req)
1050{ 1046{
1051 struct cmd_ds_802_11_ad_hoc_start cmd; 1047 struct cmd_ds_802_11_ad_hoc_start cmd;
1052 u8 preamble = RADIO_PREAMBLE_LONG; 1048 u8 preamble = RADIO_PREAMBLE_SHORT;
1053 size_t ratesize = 0; 1049 size_t ratesize = 0;
1054 u16 tmpcap = 0; 1050 u16 tmpcap = 0;
1055 int ret = 0; 1051 int ret = 0;
@@ -1057,11 +1053,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
1057 1053
1058 lbs_deb_enter(LBS_DEB_ASSOC); 1054 lbs_deb_enter(LBS_DEB_ASSOC);
1059 1055
1060 if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
1061 lbs_deb_join("ADHOC_START: Will use short preamble\n");
1062 preamble = RADIO_PREAMBLE_SHORT;
1063 }
1064
1065 ret = lbs_set_radio(priv, preamble, 1); 1056 ret = lbs_set_radio(priv, preamble, 1);
1066 if (ret) 1057 if (ret)
1067 goto out; 1058 goto out;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 42611bea76a..82371ef3952 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -143,19 +143,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
143 lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n", 143 lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
144 cmd.hwifversion, cmd.version); 144 cmd.hwifversion, cmd.version);
145 145
146 /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
147 /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
148 /* 5.110.22 have mesh command with 0xa3 command id */
149 /* 10.0.0.p0 FW brings in mesh config command with different id */
150 /* Check FW version MSB and initialize mesh_fw_ver */
151 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
152 priv->mesh_fw_ver = MESH_FW_OLD;
153 else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
154 (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK))
155 priv->mesh_fw_ver = MESH_FW_NEW;
156 else
157 priv->mesh_fw_ver = MESH_NONE;
158
159 /* Clamp region code to 8-bit since FW spec indicates that it should 146 /* Clamp region code to 8-bit since FW spec indicates that it should
160 * only ever be 8-bit, even though the field size is 16-bit. Some firmware 147 * only ever be 8-bit, even though the field size is 16-bit. Some firmware
161 * returns non-zero high 8 bits here. 148 * returns non-zero high 8 bits here.
@@ -855,9 +842,6 @@ int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
855 if (priv->fwrelease < 0x09000000) { 842 if (priv->fwrelease < 0x09000000) {
856 switch (preamble) { 843 switch (preamble) {
857 case RADIO_PREAMBLE_SHORT: 844 case RADIO_PREAMBLE_SHORT:
858 if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
859 goto out;
860 /* Fall through */
861 case RADIO_PREAMBLE_AUTO: 845 case RADIO_PREAMBLE_AUTO:
862 case RADIO_PREAMBLE_LONG: 846 case RADIO_PREAMBLE_LONG:
863 cmd.control = cpu_to_le16(preamble); 847 cmd.control = cpu_to_le16(preamble);
@@ -1011,6 +995,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1011 ret = 0; 995 ret = 0;
1012 break; 996 break;
1013 997
998#ifdef CONFIG_LIBERTAS_MESH
999
1014 case CMD_BT_ACCESS: 1000 case CMD_BT_ACCESS:
1015 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf); 1001 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
1016 break; 1002 break;
@@ -1019,6 +1005,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1019 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf); 1005 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
1020 break; 1006 break;
1021 1007
1008#endif
1009
1022 case CMD_802_11_BEACON_CTRL: 1010 case CMD_802_11_BEACON_CTRL:
1023 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); 1011 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
1024 break; 1012 break;
@@ -1317,7 +1305,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
1317 if ((priv->psmode != LBS802_11POWERMODECAM) && 1305 if ((priv->psmode != LBS802_11POWERMODECAM) &&
1318 (priv->psstate == PS_STATE_FULL_POWER) && 1306 (priv->psstate == PS_STATE_FULL_POWER) &&
1319 ((priv->connect_status == LBS_CONNECTED) || 1307 ((priv->connect_status == LBS_CONNECTED) ||
1320 (priv->mesh_connect_status == LBS_CONNECTED))) { 1308 lbs_mesh_connected(priv))) {
1321 if (priv->secinfo.WPAenabled || 1309 if (priv->secinfo.WPAenabled ||
1322 priv->secinfo.WPA2enabled) { 1310 priv->secinfo.WPA2enabled) {
1323 /* check for valid WPA group keys */ 1311 /* check for valid WPA group keys */
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 2862748aef7..cb4138a55fd 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -110,18 +110,6 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
110int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val); 110int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
111 111
112 112
113/* Mesh related */
114
115int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
116 struct cmd_ds_mesh_access *cmd);
117
118int lbs_mesh_config_send(struct lbs_private *priv,
119 struct cmd_ds_mesh_config *cmd,
120 uint16_t action, uint16_t type);
121
122int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
123
124
125/* Commands only used in wext.c, assoc. and scan.c */ 113/* Commands only used in wext.c, assoc. and scan.c */
126 114
127int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 115int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 21d57690c20..0334a58820e 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -485,20 +485,8 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
485 break; 485 break;
486 486
487 case MACREG_INT_CODE_MESH_AUTO_STARTED: 487 case MACREG_INT_CODE_MESH_AUTO_STARTED:
488 /* Ignore spurious autostart events if autostart is disabled */ 488 /* Ignore spurious autostart events */
489 if (!priv->mesh_autostart_enabled) { 489 lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
490 lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
491 break;
492 }
493 lbs_pr_info("EVENT: MESH_AUTO_STARTED\n");
494 priv->mesh_connect_status = LBS_CONNECTED;
495 if (priv->mesh_open) {
496 netif_carrier_on(priv->mesh_dev);
497 if (!priv->tx_pending_len)
498 netif_wake_queue(priv->mesh_dev);
499 }
500 priv->mode = IW_MODE_ADHOC;
501 schedule_work(&priv->sync_channel);
502 break; 490 break;
503 491
504 default: 492 default:
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 6b6ea9f7bf5..ea3f10ef4e0 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -397,13 +397,6 @@ enum KEY_INFO_WPA {
397 KEY_INFO_WPA_ENABLED = 0x04 397 KEY_INFO_WPA_ENABLED = 0x04
398}; 398};
399 399
400/** mesh_fw_ver */
401enum _mesh_fw_ver {
402 MESH_NONE = 0, /* MESH is not supported */
403 MESH_FW_OLD, /* MESH is supported in FW V5 */
404 MESH_FW_NEW, /* MESH is supported in FW V10 and newer */
405};
406
407/* Default values for fwt commands. */ 400/* Default values for fwt commands. */
408#define FWT_DEFAULT_METRIC 0 401#define FWT_DEFAULT_METRIC 0
409#define FWT_DEFAULT_DIR 1 402#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 05bb298dfae..c348aff8f30 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -39,15 +39,14 @@ struct lbs_private {
39 39
40 /* Mesh */ 40 /* Mesh */
41 struct net_device *mesh_dev; /* Virtual device */ 41 struct net_device *mesh_dev; /* Virtual device */
42#ifdef CONFIG_LIBERTAS_MESH
42 u32 mesh_connect_status; 43 u32 mesh_connect_status;
43 struct lbs_mesh_stats mstats; 44 struct lbs_mesh_stats mstats;
44 int mesh_open; 45 int mesh_open;
45 int mesh_fw_ver;
46 int mesh_autostart_enabled;
47 uint16_t mesh_tlv; 46 uint16_t mesh_tlv;
48 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1]; 47 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
49 u8 mesh_ssid_len; 48 u8 mesh_ssid_len;
50 struct work_struct sync_channel; 49#endif
51 50
52 /* Monitor mode */ 51 /* Monitor mode */
53 struct net_device *rtap_net_dev; 52 struct net_device *rtap_net_dev;
@@ -176,9 +175,7 @@ struct lbs_private {
176 struct bss_descriptor *networks; 175 struct bss_descriptor *networks;
177 struct assoc_request * pending_assoc_req; 176 struct assoc_request * pending_assoc_req;
178 struct assoc_request * in_progress_assoc_req; 177 struct assoc_request * in_progress_assoc_req;
179 u16 capability;
180 uint16_t enablehwauto; 178 uint16_t enablehwauto;
181 uint16_t ratebitmap;
182 179
183 /* ADHOC */ 180 /* ADHOC */
184 u16 beacon_period; 181 u16 beacon_period;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 63d020374c2..3804a58d7f4 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -114,9 +114,11 @@ const struct ethtool_ops lbs_ethtool_ops = {
114 .get_drvinfo = lbs_ethtool_get_drvinfo, 114 .get_drvinfo = lbs_ethtool_get_drvinfo,
115 .get_eeprom = lbs_ethtool_get_eeprom, 115 .get_eeprom = lbs_ethtool_get_eeprom,
116 .get_eeprom_len = lbs_ethtool_get_eeprom_len, 116 .get_eeprom_len = lbs_ethtool_get_eeprom_len,
117#ifdef CONFIG_LIBERTAS_MESH
117 .get_sset_count = lbs_mesh_ethtool_get_sset_count, 118 .get_sset_count = lbs_mesh_ethtool_get_sset_count,
118 .get_ethtool_stats = lbs_mesh_ethtool_get_stats, 119 .get_ethtool_stats = lbs_mesh_ethtool_get_stats,
119 .get_strings = lbs_mesh_ethtool_get_strings, 120 .get_strings = lbs_mesh_ethtool_get_strings,
121#endif
120 .get_wol = lbs_ethtool_get_wol, 122 .get_wol = lbs_ethtool_get_wol,
121 .set_wol = lbs_ethtool_set_wol, 123 .set_wol = lbs_ethtool_set_wol,
122}; 124};
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index c2975c8e2f2..60bde1233a3 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -123,7 +123,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
123 if (priv->monitormode == monitor_mode) 123 if (priv->monitormode == monitor_mode)
124 return strlen(buf); 124 return strlen(buf);
125 if (!priv->monitormode) { 125 if (!priv->monitormode) {
126 if (priv->infra_open || priv->mesh_open) 126 if (priv->infra_open || lbs_mesh_open(priv))
127 return -EBUSY; 127 return -EBUSY;
128 if (priv->mode == IW_MODE_INFRA) 128 if (priv->mode == IW_MODE_INFRA)
129 lbs_cmd_80211_deauthenticate(priv, 129 lbs_cmd_80211_deauthenticate(priv,
@@ -622,7 +622,7 @@ static int lbs_thread(void *data)
622 if (priv->connect_status == LBS_CONNECTED) 622 if (priv->connect_status == LBS_CONNECTED)
623 netif_wake_queue(priv->dev); 623 netif_wake_queue(priv->dev);
624 if (priv->mesh_dev && 624 if (priv->mesh_dev &&
625 priv->mesh_connect_status == LBS_CONNECTED) 625 lbs_mesh_connected(priv))
626 netif_wake_queue(priv->mesh_dev); 626 netif_wake_queue(priv->mesh_dev);
627 } 627 }
628 } 628 }
@@ -809,18 +809,6 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
809 return 0; 809 return 0;
810} 810}
811 811
812static void lbs_sync_channel_worker(struct work_struct *work)
813{
814 struct lbs_private *priv = container_of(work, struct lbs_private,
815 sync_channel);
816
817 lbs_deb_enter(LBS_DEB_MAIN);
818 if (lbs_update_channel(priv))
819 lbs_pr_info("Channel synchronization failed.");
820 lbs_deb_leave(LBS_DEB_MAIN);
821}
822
823
824static int lbs_init_adapter(struct lbs_private *priv) 812static int lbs_init_adapter(struct lbs_private *priv)
825{ 813{
826 size_t bufsize; 814 size_t bufsize;
@@ -848,14 +836,12 @@ static int lbs_init_adapter(struct lbs_private *priv)
848 memset(priv->current_addr, 0xff, ETH_ALEN); 836 memset(priv->current_addr, 0xff, ETH_ALEN);
849 837
850 priv->connect_status = LBS_DISCONNECTED; 838 priv->connect_status = LBS_DISCONNECTED;
851 priv->mesh_connect_status = LBS_DISCONNECTED;
852 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 839 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
853 priv->mode = IW_MODE_INFRA; 840 priv->mode = IW_MODE_INFRA;
854 priv->channel = DEFAULT_AD_HOC_CHANNEL; 841 priv->channel = DEFAULT_AD_HOC_CHANNEL;
855 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 842 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
856 priv->radio_on = 1; 843 priv->radio_on = 1;
857 priv->enablehwauto = 1; 844 priv->enablehwauto = 1;
858 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
859 priv->psmode = LBS802_11POWERMODECAM; 845 priv->psmode = LBS802_11POWERMODECAM;
860 priv->psstate = PS_STATE_FULL_POWER; 846 priv->psstate = PS_STATE_FULL_POWER;
861 priv->is_deep_sleep = 0; 847 priv->is_deep_sleep = 0;
@@ -998,11 +984,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
998 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker); 984 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
999 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); 985 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
1000 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); 986 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
1001 INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
1002
1003 priv->mesh_open = 0;
1004 sprintf(priv->mesh_ssid, "mesh");
1005 priv->mesh_ssid_len = 4;
1006 987
1007 priv->wol_criteria = 0xffffffff; 988 priv->wol_criteria = 0xffffffff;
1008 priv->wol_gpio = 0xff; 989 priv->wol_gpio = 0xff;
@@ -1076,6 +1057,17 @@ void lbs_remove_card(struct lbs_private *priv)
1076EXPORT_SYMBOL_GPL(lbs_remove_card); 1057EXPORT_SYMBOL_GPL(lbs_remove_card);
1077 1058
1078 1059
1060static int lbs_rtap_supported(struct lbs_private *priv)
1061{
1062 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
1063 return 1;
1064
1065 /* newer firmware use a capability mask */
1066 return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
1067 (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK));
1068}
1069
1070
1079int lbs_start_card(struct lbs_private *priv) 1071int lbs_start_card(struct lbs_private *priv)
1080{ 1072{
1081 struct net_device *dev = priv->dev; 1073 struct net_device *dev = priv->dev;
@@ -1095,12 +1087,14 @@ int lbs_start_card(struct lbs_private *priv)
1095 1087
1096 lbs_update_channel(priv); 1088 lbs_update_channel(priv);
1097 1089
1090 lbs_init_mesh(priv);
1091
1098 /* 1092 /*
1099 * While rtap isn't related to mesh, only mesh-enabled 1093 * While rtap isn't related to mesh, only mesh-enabled
1100 * firmware implements the rtap functionality via 1094 * firmware implements the rtap functionality via
1101 * CMD_802_11_MONITOR_MODE. 1095 * CMD_802_11_MONITOR_MODE.
1102 */ 1096 */
1103 if (lbs_init_mesh(priv)) { 1097 if (lbs_rtap_supported(priv)) {
1104 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap)) 1098 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
1105 lbs_pr_err("cannot register lbs_rtap attribute\n"); 1099 lbs_pr_err("cannot register lbs_rtap attribute\n");
1106 } 1100 }
@@ -1134,7 +1128,9 @@ void lbs_stop_card(struct lbs_private *priv)
1134 netif_carrier_off(dev); 1128 netif_carrier_off(dev);
1135 1129
1136 lbs_debugfs_remove_one(priv); 1130 lbs_debugfs_remove_one(priv);
1137 if (lbs_deinit_mesh(priv)) 1131 lbs_deinit_mesh(priv);
1132
1133 if (lbs_rtap_supported(priv))
1138 device_remove_file(&dev->dev, &dev_attr_lbs_rtap); 1134 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1139 1135
1140 /* Delete the timeout of the currently processing command */ 1136 /* Delete the timeout of the currently processing command */
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 92b7a357a5e..e385af1f458 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1,4 +1,3 @@
1#include <linux/moduleparam.h>
2#include <linux/delay.h> 1#include <linux/delay.h>
3#include <linux/etherdevice.h> 2#include <linux/etherdevice.h>
4#include <linux/netdevice.h> 3#include <linux/netdevice.h>
@@ -197,7 +196,14 @@ int lbs_init_mesh(struct lbs_private *priv)
197 196
198 lbs_deb_enter(LBS_DEB_MESH); 197 lbs_deb_enter(LBS_DEB_MESH);
199 198
200 if (priv->mesh_fw_ver == MESH_FW_OLD) { 199 priv->mesh_connect_status = LBS_DISCONNECTED;
200
201 /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
202 /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
203 /* 5.110.22 have mesh command with 0xa3 command id */
204 /* 10.0.0.p0 FW brings in mesh config command with different id */
205 /* Check FW version MSB and initialize mesh_fw_ver */
206 if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) {
201 /* Enable mesh, if supported, and work out which TLV it uses. 207 /* Enable mesh, if supported, and work out which TLV it uses.
202 0x100 + 291 is an unofficial value used in 5.110.20.pXX 208 0x100 + 291 is an unofficial value used in 5.110.20.pXX
203 0x100 + 37 is the official value used in 5.110.21.pXX 209 0x100 + 37 is the official value used in 5.110.21.pXX
@@ -219,7 +225,9 @@ int lbs_init_mesh(struct lbs_private *priv)
219 priv->channel)) 225 priv->channel))
220 priv->mesh_tlv = 0; 226 priv->mesh_tlv = 0;
221 } 227 }
222 } else if (priv->mesh_fw_ver == MESH_FW_NEW) { 228 } else
229 if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
230 (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) {
223 /* 10.0.0.pXX new firmwares should succeed with TLV 231 /* 10.0.0.pXX new firmwares should succeed with TLV
224 * 0x100+37; Do not invoke command with old TLV. 232 * 0x100+37; Do not invoke command with old TLV.
225 */ 233 */
@@ -228,7 +236,12 @@ int lbs_init_mesh(struct lbs_private *priv)
228 priv->channel)) 236 priv->channel))
229 priv->mesh_tlv = 0; 237 priv->mesh_tlv = 0;
230 } 238 }
239
240
231 if (priv->mesh_tlv) { 241 if (priv->mesh_tlv) {
242 sprintf(priv->mesh_ssid, "mesh");
243 priv->mesh_ssid_len = 4;
244
232 lbs_add_mesh(priv); 245 lbs_add_mesh(priv);
233 246
234 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh)) 247 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
@@ -416,10 +429,10 @@ struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
416 struct net_device *dev, struct rxpd *rxpd) 429 struct net_device *dev, struct rxpd *rxpd)
417{ 430{
418 if (priv->mesh_dev) { 431 if (priv->mesh_dev) {
419 if (priv->mesh_fw_ver == MESH_FW_OLD) { 432 if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) {
420 if (rxpd->rx_control & RxPD_MESH_FRAME) 433 if (rxpd->rx_control & RxPD_MESH_FRAME)
421 dev = priv->mesh_dev; 434 dev = priv->mesh_dev;
422 } else if (priv->mesh_fw_ver == MESH_FW_NEW) { 435 } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) {
423 if (rxpd->u.bss.bss_num == MESH_IFACE_ID) 436 if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
424 dev = priv->mesh_dev; 437 dev = priv->mesh_dev;
425 } 438 }
@@ -432,9 +445,9 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
432 struct net_device *dev, struct txpd *txpd) 445 struct net_device *dev, struct txpd *txpd)
433{ 446{
434 if (dev == priv->mesh_dev) { 447 if (dev == priv->mesh_dev) {
435 if (priv->mesh_fw_ver == MESH_FW_OLD) 448 if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID)
436 txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME); 449 txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
437 else if (priv->mesh_fw_ver == MESH_FW_NEW) 450 else if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
438 txpd->u.bss.bss_num = MESH_IFACE_ID; 451 txpd->u.bss.bss_num = MESH_IFACE_ID;
439 } 452 }
440} 453}
@@ -538,7 +551,7 @@ static int __lbs_mesh_config_send(struct lbs_private *priv,
538 * Command id is 0xac for v10 FW along with mesh interface 551 * Command id is 0xac for v10 FW along with mesh interface
539 * id in bits 14-13-12. 552 * id in bits 14-13-12.
540 */ 553 */
541 if (priv->mesh_fw_ver == MESH_FW_NEW) 554 if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
542 command = CMD_MESH_CONFIG | 555 command = CMD_MESH_CONFIG |
543 (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET); 556 (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
544 557
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index fea9b5d005f..e2573303a32 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -9,6 +9,8 @@
9#include <net/lib80211.h> 9#include <net/lib80211.h>
10 10
11 11
12#ifdef CONFIG_LIBERTAS_MESH
13
12/* Mesh statistics */ 14/* Mesh statistics */
13struct lbs_mesh_stats { 15struct lbs_mesh_stats {
14 u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */ 16 u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */
@@ -46,11 +48,20 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
46/* Command handling */ 48/* Command handling */
47 49
48struct cmd_ds_command; 50struct cmd_ds_command;
51struct cmd_ds_mesh_access;
52struct cmd_ds_mesh_config;
49 53
50int lbs_cmd_bt_access(struct cmd_ds_command *cmd, 54int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
51 u16 cmd_action, void *pdata_buf); 55 u16 cmd_action, void *pdata_buf);
52int lbs_cmd_fwt_access(struct cmd_ds_command *cmd, 56int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
53 u16 cmd_action, void *pdata_buf); 57 u16 cmd_action, void *pdata_buf);
58int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
59 struct cmd_ds_mesh_access *cmd);
60int lbs_mesh_config_send(struct lbs_private *priv,
61 struct cmd_ds_mesh_config *cmd,
62 uint16_t action, uint16_t type);
63int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
64
54 65
55 66
56/* Persistent configuration */ 67/* Persistent configuration */
@@ -75,4 +86,25 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
75 uint32_t stringset, uint8_t *s); 86 uint32_t stringset, uint8_t *s);
76 87
77 88
89/* Accessors */
90
91#define lbs_mesh_open(priv) (priv->mesh_open)
92#define lbs_mesh_connected(priv) (priv->mesh_connect_status == LBS_CONNECTED)
93
94#else
95
96#define lbs_init_mesh(priv)
97#define lbs_deinit_mesh(priv)
98#define lbs_add_mesh(priv)
99#define lbs_remove_mesh(priv)
100#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
101#define lbs_mesh_set_txpd(priv, dev, txpd)
102#define lbs_mesh_config(priv, enable, chan)
103#define lbs_mesh_open(priv) (0)
104#define lbs_mesh_connected(priv) (0)
105
106#endif
107
108
109
78#endif 110#endif
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index b0b1c784150..220361e69cd 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -635,7 +635,7 @@ out:
635 if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len) 635 if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
636 netif_wake_queue(priv->dev); 636 netif_wake_queue(priv->dev);
637 637
638 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) && 638 if (priv->mesh_dev && lbs_mesh_connected(priv) &&
639 !priv->tx_pending_len) 639 !priv->tx_pending_len)
640 netif_wake_queue(priv->mesh_dev); 640 netif_wake_queue(priv->mesh_dev);
641 641
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 315d1ce286c..52d244ea3d9 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -198,7 +198,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
198 if (priv->connect_status == LBS_CONNECTED) 198 if (priv->connect_status == LBS_CONNECTED)
199 netif_wake_queue(priv->dev); 199 netif_wake_queue(priv->dev);
200 200
201 if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) 201 if (priv->mesh_dev && lbs_mesh_connected(priv))
202 netif_wake_queue(priv->mesh_dev); 202 netif_wake_queue(priv->mesh_dev);
203} 203}
204EXPORT_SYMBOL_GPL(lbs_send_tx_feedback); 204EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 4b1aab593a8..71f88a08e09 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -192,7 +192,7 @@ static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
192 lbs_deb_enter(LBS_DEB_WEXT); 192 lbs_deb_enter(LBS_DEB_WEXT);
193 193
194 if ((priv->connect_status != LBS_CONNECTED) && 194 if ((priv->connect_status != LBS_CONNECTED) &&
195 (priv->mesh_connect_status != LBS_CONNECTED)) 195 !lbs_mesh_connected(priv))
196 memcpy(rates, lbs_bg_rates, MAX_RATES); 196 memcpy(rates, lbs_bg_rates, MAX_RATES);
197 else 197 else
198 memcpy(rates, priv->curbssparams.rates, MAX_RATES); 198 memcpy(rates, priv->curbssparams.rates, MAX_RATES);
@@ -298,6 +298,7 @@ static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
298 return 0; 298 return 0;
299} 299}
300 300
301#ifdef CONFIG_LIBERTAS_MESH
301static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info, 302static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
302 struct iw_point *dwrq, char *extra) 303 struct iw_point *dwrq, char *extra)
303{ 304{
@@ -307,7 +308,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
307 308
308 /* Use nickname to indicate that mesh is on */ 309 /* Use nickname to indicate that mesh is on */
309 310
310 if (priv->mesh_connect_status == LBS_CONNECTED) { 311 if (lbs_mesh_connected(priv)) {
311 strncpy(extra, "Mesh", 12); 312 strncpy(extra, "Mesh", 12);
312 extra[12] = '\0'; 313 extra[12] = '\0';
313 dwrq->length = strlen(extra); 314 dwrq->length = strlen(extra);
@@ -321,6 +322,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
321 lbs_deb_leave(LBS_DEB_WEXT); 322 lbs_deb_leave(LBS_DEB_WEXT);
322 return 0; 323 return 0;
323} 324}
325#endif
324 326
325static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info, 327static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
326 struct iw_param *vwrq, char *extra) 328 struct iw_param *vwrq, char *extra)
@@ -422,6 +424,7 @@ static int lbs_get_mode(struct net_device *dev,
422 return 0; 424 return 0;
423} 425}
424 426
427#ifdef CONFIG_LIBERTAS_MESH
425static int mesh_wlan_get_mode(struct net_device *dev, 428static int mesh_wlan_get_mode(struct net_device *dev,
426 struct iw_request_info *info, u32 * uwrq, 429 struct iw_request_info *info, u32 * uwrq,
427 char *extra) 430 char *extra)
@@ -433,6 +436,7 @@ static int mesh_wlan_get_mode(struct net_device *dev,
433 lbs_deb_leave(LBS_DEB_WEXT); 436 lbs_deb_leave(LBS_DEB_WEXT);
434 return 0; 437 return 0;
435} 438}
439#endif
436 440
437static int lbs_get_txpow(struct net_device *dev, 441static int lbs_get_txpow(struct net_device *dev,
438 struct iw_request_info *info, 442 struct iw_request_info *info,
@@ -863,7 +867,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
863 867
864 /* If we're not associated, all quality values are meaningless */ 868 /* If we're not associated, all quality values are meaningless */
865 if ((priv->connect_status != LBS_CONNECTED) && 869 if ((priv->connect_status != LBS_CONNECTED) &&
866 (priv->mesh_connect_status != LBS_CONNECTED)) 870 !lbs_mesh_connected(priv))
867 goto out; 871 goto out;
868 872
869 /* Quality by RSSI */ 873 /* Quality by RSSI */
@@ -1010,6 +1014,7 @@ out:
1010 return ret; 1014 return ret;
1011} 1015}
1012 1016
1017#ifdef CONFIG_LIBERTAS_MESH
1013static int lbs_mesh_set_freq(struct net_device *dev, 1018static int lbs_mesh_set_freq(struct net_device *dev,
1014 struct iw_request_info *info, 1019 struct iw_request_info *info,
1015 struct iw_freq *fwrq, char *extra) 1020 struct iw_freq *fwrq, char *extra)
@@ -1061,6 +1066,7 @@ out:
1061 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 1066 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1062 return ret; 1067 return ret;
1063} 1068}
1069#endif
1064 1070
1065static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info, 1071static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
1066 struct iw_param *vwrq, char *extra) 1072 struct iw_param *vwrq, char *extra)
@@ -2108,6 +2114,7 @@ out:
2108 return ret; 2114 return ret;
2109} 2115}
2110 2116
2117#ifdef CONFIG_LIBERTAS_MESH
2111static int lbs_mesh_get_essid(struct net_device *dev, 2118static int lbs_mesh_get_essid(struct net_device *dev,
2112 struct iw_request_info *info, 2119 struct iw_request_info *info,
2113 struct iw_point *dwrq, char *extra) 2120 struct iw_point *dwrq, char *extra)
@@ -2161,6 +2168,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2161 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 2168 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2162 return ret; 2169 return ret;
2163} 2170}
2171#endif
2164 2172
2165/** 2173/**
2166 * @brief Connect to the AP or Ad-hoc Network with specific bssid 2174 * @brief Connect to the AP or Ad-hoc Network with specific bssid
@@ -2267,7 +2275,13 @@ static const iw_handler lbs_handler[] = {
2267 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ 2275 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
2268 (iw_handler) NULL, /* SIOCSIWPMKSA */ 2276 (iw_handler) NULL, /* SIOCSIWPMKSA */
2269}; 2277};
2278struct iw_handler_def lbs_handler_def = {
2279 .num_standard = ARRAY_SIZE(lbs_handler),
2280 .standard = (iw_handler *) lbs_handler,
2281 .get_wireless_stats = lbs_get_wireless_stats,
2282};
2270 2283
2284#ifdef CONFIG_LIBERTAS_MESH
2271static const iw_handler mesh_wlan_handler[] = { 2285static const iw_handler mesh_wlan_handler[] = {
2272 (iw_handler) NULL, /* SIOCSIWCOMMIT */ 2286 (iw_handler) NULL, /* SIOCSIWCOMMIT */
2273 (iw_handler) lbs_get_name, /* SIOCGIWNAME */ 2287 (iw_handler) lbs_get_name, /* SIOCGIWNAME */
@@ -2325,14 +2339,10 @@ static const iw_handler mesh_wlan_handler[] = {
2325 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ 2339 (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
2326 (iw_handler) NULL, /* SIOCSIWPMKSA */ 2340 (iw_handler) NULL, /* SIOCSIWPMKSA */
2327}; 2341};
2328struct iw_handler_def lbs_handler_def = {
2329 .num_standard = ARRAY_SIZE(lbs_handler),
2330 .standard = (iw_handler *) lbs_handler,
2331 .get_wireless_stats = lbs_get_wireless_stats,
2332};
2333 2342
2334struct iw_handler_def mesh_handler_def = { 2343struct iw_handler_def mesh_handler_def = {
2335 .num_standard = ARRAY_SIZE(mesh_wlan_handler), 2344 .num_standard = ARRAY_SIZE(mesh_wlan_handler),
2336 .standard = (iw_handler *) mesh_wlan_handler, 2345 .standard = (iw_handler *) mesh_wlan_handler,
2337 .get_wireless_stats = lbs_get_wireless_stats, 2346 .get_wireless_stats = lbs_get_wireless_stats,
2338}; 2347};
2348#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 26a1abd5bb0..ba3eb0101d5 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -318,14 +318,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
318} 318}
319 319
320static int lbtf_op_add_interface(struct ieee80211_hw *hw, 320static int lbtf_op_add_interface(struct ieee80211_hw *hw,
321 struct ieee80211_if_init_conf *conf) 321 struct ieee80211_vif *vif)
322{ 322{
323 struct lbtf_private *priv = hw->priv; 323 struct lbtf_private *priv = hw->priv;
324 if (priv->vif != NULL) 324 if (priv->vif != NULL)
325 return -EOPNOTSUPP; 325 return -EOPNOTSUPP;
326 326
327 priv->vif = conf->vif; 327 priv->vif = vif;
328 switch (conf->type) { 328 switch (vif->type) {
329 case NL80211_IFTYPE_MESH_POINT: 329 case NL80211_IFTYPE_MESH_POINT:
330 case NL80211_IFTYPE_AP: 330 case NL80211_IFTYPE_AP:
331 lbtf_set_mode(priv, LBTF_AP_MODE); 331 lbtf_set_mode(priv, LBTF_AP_MODE);
@@ -337,12 +337,12 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
337 priv->vif = NULL; 337 priv->vif = NULL;
338 return -EOPNOTSUPP; 338 return -EOPNOTSUPP;
339 } 339 }
340 lbtf_set_mac_address(priv, (u8 *) conf->mac_addr); 340 lbtf_set_mac_address(priv, (u8 *) vif->addr);
341 return 0; 341 return 0;
342} 342}
343 343
344static void lbtf_op_remove_interface(struct ieee80211_hw *hw, 344static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
345 struct ieee80211_if_init_conf *conf) 345 struct ieee80211_vif *vif)
346{ 346{
347 struct lbtf_private *priv = hw->priv; 347 struct lbtf_private *priv = hw->priv;
348 348
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 88e41176e7f..84df3fcf37b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -436,6 +436,38 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
436} 436}
437 437
438 438
439struct mac80211_hwsim_addr_match_data {
440 bool ret;
441 const u8 *addr;
442};
443
444static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
445 struct ieee80211_vif *vif)
446{
447 struct mac80211_hwsim_addr_match_data *md = data;
448 if (memcmp(mac, md->addr, ETH_ALEN) == 0)
449 md->ret = true;
450}
451
452
453static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
454 const u8 *addr)
455{
456 struct mac80211_hwsim_addr_match_data md;
457
458 if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0)
459 return true;
460
461 md.ret = false;
462 md.addr = addr;
463 ieee80211_iterate_active_interfaces_atomic(data->hw,
464 mac80211_hwsim_addr_iter,
465 &md);
466
467 return md.ret;
468}
469
470
439static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, 471static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
440 struct sk_buff *skb) 472 struct sk_buff *skb)
441{ 473{
@@ -488,8 +520,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
488 if (nskb == NULL) 520 if (nskb == NULL)
489 continue; 521 continue;
490 522
491 if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr, 523 if (mac80211_hwsim_addr_match(data2, hdr->addr1))
492 ETH_ALEN) == 0)
493 ack = true; 524 ack = true;
494 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); 525 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
495 ieee80211_rx_irqsafe(data2->hw, nskb); 526 ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -553,24 +584,24 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
553 584
554 585
555static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, 586static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
556 struct ieee80211_if_init_conf *conf) 587 struct ieee80211_vif *vif)
557{ 588{
558 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", 589 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
559 wiphy_name(hw->wiphy), __func__, conf->type, 590 wiphy_name(hw->wiphy), __func__, vif->type,
560 conf->mac_addr); 591 vif->addr);
561 hwsim_set_magic(conf->vif); 592 hwsim_set_magic(vif);
562 return 0; 593 return 0;
563} 594}
564 595
565 596
566static void mac80211_hwsim_remove_interface( 597static void mac80211_hwsim_remove_interface(
567 struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) 598 struct ieee80211_hw *hw, struct ieee80211_vif *vif)
568{ 599{
569 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", 600 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
570 wiphy_name(hw->wiphy), __func__, conf->type, 601 wiphy_name(hw->wiphy), __func__, vif->type,
571 conf->mac_addr); 602 vif->addr);
572 hwsim_check_magic(conf->vif); 603 hwsim_check_magic(vif);
573 hwsim_clear_magic(conf->vif); 604 hwsim_clear_magic(vif);
574} 605}
575 606
576 607
@@ -618,12 +649,26 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
618{ 649{
619 struct mac80211_hwsim_data *data = hw->priv; 650 struct mac80211_hwsim_data *data = hw->priv;
620 struct ieee80211_conf *conf = &hw->conf; 651 struct ieee80211_conf *conf = &hw->conf;
621 652 static const char *chantypes[4] = {
622 printk(KERN_DEBUG "%s:%s (freq=%d idle=%d ps=%d)\n", 653 [NL80211_CHAN_NO_HT] = "noht",
654 [NL80211_CHAN_HT20] = "ht20",
655 [NL80211_CHAN_HT40MINUS] = "ht40-",
656 [NL80211_CHAN_HT40PLUS] = "ht40+",
657 };
658 static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
659 [IEEE80211_SMPS_AUTOMATIC] = "auto",
660 [IEEE80211_SMPS_OFF] = "off",
661 [IEEE80211_SMPS_STATIC] = "static",
662 [IEEE80211_SMPS_DYNAMIC] = "dynamic",
663 };
664
665 printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
623 wiphy_name(hw->wiphy), __func__, 666 wiphy_name(hw->wiphy), __func__,
624 conf->channel->center_freq, 667 conf->channel->center_freq,
668 chantypes[conf->channel_type],
625 !!(conf->flags & IEEE80211_CONF_IDLE), 669 !!(conf->flags & IEEE80211_CONF_IDLE),
626 !!(conf->flags & IEEE80211_CONF_PS)); 670 !!(conf->flags & IEEE80211_CONF_PS),
671 smps_modes[conf->smps_mode]);
627 672
628 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); 673 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
629 674
@@ -827,6 +872,41 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
827} 872}
828#endif 873#endif
829 874
875static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
876 struct ieee80211_vif *vif,
877 enum ieee80211_ampdu_mlme_action action,
878 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
879{
880 switch (action) {
881 case IEEE80211_AMPDU_TX_START:
882 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
883 break;
884 case IEEE80211_AMPDU_TX_STOP:
885 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
886 break;
887 case IEEE80211_AMPDU_TX_OPERATIONAL:
888 break;
889 case IEEE80211_AMPDU_RX_START:
890 case IEEE80211_AMPDU_RX_STOP:
891 break;
892 default:
893 return -EOPNOTSUPP;
894 }
895
896 return 0;
897}
898
899static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
900{
901 /*
902 * In this special case, there's nothing we need to
903 * do because hwsim does transmission synchronously.
904 * In the future, when it does transmissions via
905 * userspace, we may need to do something.
906 */
907}
908
909
830static const struct ieee80211_ops mac80211_hwsim_ops = 910static const struct ieee80211_ops mac80211_hwsim_ops =
831{ 911{
832 .tx = mac80211_hwsim_tx, 912 .tx = mac80211_hwsim_tx,
@@ -841,6 +921,8 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
841 .set_tim = mac80211_hwsim_set_tim, 921 .set_tim = mac80211_hwsim_set_tim,
842 .conf_tx = mac80211_hwsim_conf_tx, 922 .conf_tx = mac80211_hwsim_conf_tx,
843 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) 923 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
924 .ampdu_action = mac80211_hwsim_ampdu_action,
925 .flush = mac80211_hwsim_flush,
844}; 926};
845 927
846 928
@@ -1082,7 +1164,9 @@ static int __init init_mac80211_hwsim(void)
1082 BIT(NL80211_IFTYPE_MESH_POINT); 1164 BIT(NL80211_IFTYPE_MESH_POINT);
1083 1165
1084 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1166 hw->flags = IEEE80211_HW_MFP_CAPABLE |
1085 IEEE80211_HW_SIGNAL_DBM; 1167 IEEE80211_HW_SIGNAL_DBM |
1168 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
1169 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
1086 1170
1087 /* ask mac80211 to reserve space for magic */ 1171 /* ask mac80211 to reserve space for magic */
1088 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 1172 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 59f92105b0c..68546ca0ba3 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2,7 +2,7 @@
2 * drivers/net/wireless/mwl8k.c 2 * drivers/net/wireless/mwl8k.c
3 * Driver for Marvell TOPDOG 802.11 Wireless cards 3 * Driver for Marvell TOPDOG 802.11 Wireless cards
4 * 4 *
5 * Copyright (C) 2008-2009 Marvell Semiconductor Inc. 5 * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc.
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -26,7 +26,7 @@
26 26
27#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver" 27#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
28#define MWL8K_NAME KBUILD_MODNAME 28#define MWL8K_NAME KBUILD_MODNAME
29#define MWL8K_VERSION "0.10" 29#define MWL8K_VERSION "0.12"
30 30
31/* Register definitions */ 31/* Register definitions */
32#define MWL8K_HIU_GEN_PTR 0x00000c10 32#define MWL8K_HIU_GEN_PTR 0x00000c10
@@ -92,8 +92,7 @@ struct mwl8k_device_info {
92 char *part_name; 92 char *part_name;
93 char *helper_image; 93 char *helper_image;
94 char *fw_image; 94 char *fw_image;
95 struct rxd_ops *rxd_ops; 95 struct rxd_ops *ap_rxd_ops;
96 u16 modes;
97}; 96};
98 97
99struct mwl8k_rx_queue { 98struct mwl8k_rx_queue {
@@ -126,28 +125,30 @@ struct mwl8k_tx_queue {
126 struct sk_buff **skb; 125 struct sk_buff **skb;
127}; 126};
128 127
129/* Pointers to the firmware data and meta information about it. */ 128struct mwl8k_priv {
130struct mwl8k_firmware { 129 struct ieee80211_hw *hw;
131 /* Boot helper code */ 130 struct pci_dev *pdev;
132 struct firmware *helper;
133 131
134 /* Microcode */ 132 struct mwl8k_device_info *device_info;
135 struct firmware *ucode;
136};
137 133
138struct mwl8k_priv {
139 void __iomem *sram; 134 void __iomem *sram;
140 void __iomem *regs; 135 void __iomem *regs;
141 struct ieee80211_hw *hw;
142 136
143 struct pci_dev *pdev; 137 /* firmware */
138 struct firmware *fw_helper;
139 struct firmware *fw_ucode;
144 140
145 struct mwl8k_device_info *device_info; 141 /* hardware/firmware parameters */
146 bool ap_fw; 142 bool ap_fw;
147 struct rxd_ops *rxd_ops; 143 struct rxd_ops *rxd_ops;
148 144 struct ieee80211_supported_band band_24;
149 /* firmware files and meta data */ 145 struct ieee80211_channel channels_24[14];
150 struct mwl8k_firmware fw; 146 struct ieee80211_rate rates_24[14];
147 struct ieee80211_supported_band band_50;
148 struct ieee80211_channel channels_50[4];
149 struct ieee80211_rate rates_50[9];
150 u32 ap_macids_supported;
151 u32 sta_macids_supported;
151 152
152 /* firmware access */ 153 /* firmware access */
153 struct mutex fw_mutex; 154 struct mutex fw_mutex;
@@ -161,9 +162,9 @@ struct mwl8k_priv {
161 /* TX quiesce completion, protected by fw_mutex and tx_lock */ 162 /* TX quiesce completion, protected by fw_mutex and tx_lock */
162 struct completion *tx_wait; 163 struct completion *tx_wait;
163 164
164 struct ieee80211_vif *vif; 165 /* List of interfaces. */
165 166 u32 macids_used;
166 struct ieee80211_channel *current_channel; 167 struct list_head vif_list;
167 168
168 /* power management status cookie from firmware */ 169 /* power management status cookie from firmware */
169 u32 *cookie; 170 u32 *cookie;
@@ -182,16 +183,15 @@ struct mwl8k_priv {
182 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES]; 183 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
183 struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES]; 184 struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
184 185
185 /* PHY parameters */
186 struct ieee80211_supported_band band;
187 struct ieee80211_channel channels[14];
188 struct ieee80211_rate rates[14];
189
190 bool radio_on; 186 bool radio_on;
191 bool radio_short_preamble; 187 bool radio_short_preamble;
192 bool sniffer_enabled; 188 bool sniffer_enabled;
193 bool wmm_enabled; 189 bool wmm_enabled;
194 190
191 struct work_struct sta_notify_worker;
192 spinlock_t sta_notify_list_lock;
193 struct list_head sta_notify_list;
194
195 /* XXX need to convert this to handle multiple interfaces */ 195 /* XXX need to convert this to handle multiple interfaces */
196 bool capture_beacon; 196 bool capture_beacon;
197 u8 capture_bssid[ETH_ALEN]; 197 u8 capture_bssid[ETH_ALEN];
@@ -205,32 +205,33 @@ struct mwl8k_priv {
205 */ 205 */
206 struct work_struct finalize_join_worker; 206 struct work_struct finalize_join_worker;
207 207
208 /* Tasklet to reclaim TX descriptors and buffers after tx */ 208 /* Tasklet to perform TX reclaim. */
209 struct tasklet_struct tx_reclaim_task; 209 struct tasklet_struct poll_tx_task;
210
211 /* Tasklet to perform RX. */
212 struct tasklet_struct poll_rx_task;
210}; 213};
211 214
212/* Per interface specific private data */ 215/* Per interface specific private data */
213struct mwl8k_vif { 216struct mwl8k_vif {
214 /* backpointer to parent config block */ 217 struct list_head list;
215 struct mwl8k_priv *priv; 218 struct ieee80211_vif *vif;
216
217 /* BSS config of AP or IBSS from mac80211*/
218 struct ieee80211_bss_conf bss_info;
219
220 /* BSSID of AP or IBSS */
221 u8 bssid[ETH_ALEN];
222 u8 mac_addr[ETH_ALEN];
223 219
224 /* Index into station database.Returned by update_sta_db call */ 220 /* Firmware macid for this vif. */
225 u8 peer_id; 221 int macid;
226 222
227 /* Non AMPDU sequence number assigned by driver */ 223 /* Non AMPDU sequence number assigned by driver. */
228 u16 seqno; 224 u16 seqno;
229}; 225};
230
231#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) 226#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
232 227
233static const struct ieee80211_channel mwl8k_channels[] = { 228struct mwl8k_sta {
229 /* Index into station database. Returned by UPDATE_STADB. */
230 u8 peer_id;
231};
232#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
233
234static const struct ieee80211_channel mwl8k_channels_24[] = {
234 { .center_freq = 2412, .hw_value = 1, }, 235 { .center_freq = 2412, .hw_value = 1, },
235 { .center_freq = 2417, .hw_value = 2, }, 236 { .center_freq = 2417, .hw_value = 2, },
236 { .center_freq = 2422, .hw_value = 3, }, 237 { .center_freq = 2422, .hw_value = 3, },
@@ -242,9 +243,12 @@ static const struct ieee80211_channel mwl8k_channels[] = {
242 { .center_freq = 2452, .hw_value = 9, }, 243 { .center_freq = 2452, .hw_value = 9, },
243 { .center_freq = 2457, .hw_value = 10, }, 244 { .center_freq = 2457, .hw_value = 10, },
244 { .center_freq = 2462, .hw_value = 11, }, 245 { .center_freq = 2462, .hw_value = 11, },
246 { .center_freq = 2467, .hw_value = 12, },
247 { .center_freq = 2472, .hw_value = 13, },
248 { .center_freq = 2484, .hw_value = 14, },
245}; 249};
246 250
247static const struct ieee80211_rate mwl8k_rates[] = { 251static const struct ieee80211_rate mwl8k_rates_24[] = {
248 { .bitrate = 10, .hw_value = 2, }, 252 { .bitrate = 10, .hw_value = 2, },
249 { .bitrate = 20, .hw_value = 4, }, 253 { .bitrate = 20, .hw_value = 4, },
250 { .bitrate = 55, .hw_value = 11, }, 254 { .bitrate = 55, .hw_value = 11, },
@@ -261,8 +265,23 @@ static const struct ieee80211_rate mwl8k_rates[] = {
261 { .bitrate = 720, .hw_value = 144, }, 265 { .bitrate = 720, .hw_value = 144, },
262}; 266};
263 267
264static const u8 mwl8k_rateids[12] = { 268static const struct ieee80211_channel mwl8k_channels_50[] = {
265 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 269 { .center_freq = 5180, .hw_value = 36, },
270 { .center_freq = 5200, .hw_value = 40, },
271 { .center_freq = 5220, .hw_value = 44, },
272 { .center_freq = 5240, .hw_value = 48, },
273};
274
275static const struct ieee80211_rate mwl8k_rates_50[] = {
276 { .bitrate = 60, .hw_value = 12, },
277 { .bitrate = 90, .hw_value = 18, },
278 { .bitrate = 120, .hw_value = 24, },
279 { .bitrate = 180, .hw_value = 36, },
280 { .bitrate = 240, .hw_value = 48, },
281 { .bitrate = 360, .hw_value = 72, },
282 { .bitrate = 480, .hw_value = 96, },
283 { .bitrate = 540, .hw_value = 108, },
284 { .bitrate = 720, .hw_value = 144, },
266}; 285};
267 286
268/* Set or get info from Firmware */ 287/* Set or get info from Firmware */
@@ -278,6 +297,7 @@ static const u8 mwl8k_rateids[12] = {
278#define MWL8K_CMD_RADIO_CONTROL 0x001c 297#define MWL8K_CMD_RADIO_CONTROL 0x001c
279#define MWL8K_CMD_RF_TX_POWER 0x001e 298#define MWL8K_CMD_RF_TX_POWER 0x001e
280#define MWL8K_CMD_RF_ANTENNA 0x0020 299#define MWL8K_CMD_RF_ANTENNA 0x0020
300#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
281#define MWL8K_CMD_SET_PRE_SCAN 0x0107 301#define MWL8K_CMD_SET_PRE_SCAN 0x0107
282#define MWL8K_CMD_SET_POST_SCAN 0x0108 302#define MWL8K_CMD_SET_POST_SCAN 0x0108
283#define MWL8K_CMD_SET_RF_CHANNEL 0x010a 303#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -291,8 +311,10 @@ static const u8 mwl8k_rateids[12] = {
291#define MWL8K_CMD_MIMO_CONFIG 0x0125 311#define MWL8K_CMD_MIMO_CONFIG 0x0125
292#define MWL8K_CMD_USE_FIXED_RATE 0x0126 312#define MWL8K_CMD_USE_FIXED_RATE 0x0126
293#define MWL8K_CMD_ENABLE_SNIFFER 0x0150 313#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
294#define MWL8K_CMD_SET_MAC_ADDR 0x0202 314#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
295#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 315#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
316#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
317#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
296#define MWL8K_CMD_UPDATE_STADB 0x1123 318#define MWL8K_CMD_UPDATE_STADB 0x1123
297 319
298static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize) 320static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
@@ -310,6 +332,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
310 MWL8K_CMDNAME(RADIO_CONTROL); 332 MWL8K_CMDNAME(RADIO_CONTROL);
311 MWL8K_CMDNAME(RF_TX_POWER); 333 MWL8K_CMDNAME(RF_TX_POWER);
312 MWL8K_CMDNAME(RF_ANTENNA); 334 MWL8K_CMDNAME(RF_ANTENNA);
335 MWL8K_CMDNAME(SET_BEACON);
313 MWL8K_CMDNAME(SET_PRE_SCAN); 336 MWL8K_CMDNAME(SET_PRE_SCAN);
314 MWL8K_CMDNAME(SET_POST_SCAN); 337 MWL8K_CMDNAME(SET_POST_SCAN);
315 MWL8K_CMDNAME(SET_RF_CHANNEL); 338 MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -325,6 +348,8 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
325 MWL8K_CMDNAME(ENABLE_SNIFFER); 348 MWL8K_CMDNAME(ENABLE_SNIFFER);
326 MWL8K_CMDNAME(SET_MAC_ADDR); 349 MWL8K_CMDNAME(SET_MAC_ADDR);
327 MWL8K_CMDNAME(SET_RATEADAPT_MODE); 350 MWL8K_CMDNAME(SET_RATEADAPT_MODE);
351 MWL8K_CMDNAME(BSS_START);
352 MWL8K_CMDNAME(SET_NEW_STN);
328 MWL8K_CMDNAME(UPDATE_STADB); 353 MWL8K_CMDNAME(UPDATE_STADB);
329 default: 354 default:
330 snprintf(buf, bufsize, "0x%x", cmd); 355 snprintf(buf, bufsize, "0x%x", cmd);
@@ -355,8 +380,8 @@ static void mwl8k_release_fw(struct firmware **fw)
355 380
356static void mwl8k_release_firmware(struct mwl8k_priv *priv) 381static void mwl8k_release_firmware(struct mwl8k_priv *priv)
357{ 382{
358 mwl8k_release_fw(&priv->fw.ucode); 383 mwl8k_release_fw(&priv->fw_ucode);
359 mwl8k_release_fw(&priv->fw.helper); 384 mwl8k_release_fw(&priv->fw_helper);
360} 385}
361 386
362/* Request fw image */ 387/* Request fw image */
@@ -377,7 +402,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
377 int rc; 402 int rc;
378 403
379 if (di->helper_image != NULL) { 404 if (di->helper_image != NULL) {
380 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper); 405 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper);
381 if (rc) { 406 if (rc) {
382 printk(KERN_ERR "%s: Error requesting helper " 407 printk(KERN_ERR "%s: Error requesting helper "
383 "firmware file %s\n", pci_name(priv->pdev), 408 "firmware file %s\n", pci_name(priv->pdev),
@@ -386,24 +411,22 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
386 } 411 }
387 } 412 }
388 413
389 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode); 414 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode);
390 if (rc) { 415 if (rc) {
391 printk(KERN_ERR "%s: Error requesting firmware file %s\n", 416 printk(KERN_ERR "%s: Error requesting firmware file %s\n",
392 pci_name(priv->pdev), di->fw_image); 417 pci_name(priv->pdev), di->fw_image);
393 mwl8k_release_fw(&priv->fw.helper); 418 mwl8k_release_fw(&priv->fw_helper);
394 return rc; 419 return rc;
395 } 420 }
396 421
397 return 0; 422 return 0;
398} 423}
399 424
400MODULE_FIRMWARE("mwl8k/helper_8687.fw");
401MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
402
403struct mwl8k_cmd_pkt { 425struct mwl8k_cmd_pkt {
404 __le16 code; 426 __le16 code;
405 __le16 length; 427 __le16 length;
406 __le16 seq_num; 428 __u8 seq_num;
429 __u8 macid;
407 __le16 result; 430 __le16 result;
408 char payload[0]; 431 char payload[0];
409} __attribute__((packed)); 432} __attribute__((packed));
@@ -461,6 +484,7 @@ static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
461 484
462 cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD); 485 cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
463 cmd->seq_num = 0; 486 cmd->seq_num = 0;
487 cmd->macid = 0;
464 cmd->result = 0; 488 cmd->result = 0;
465 489
466 done = 0; 490 done = 0;
@@ -551,13 +575,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
551static int mwl8k_load_firmware(struct ieee80211_hw *hw) 575static int mwl8k_load_firmware(struct ieee80211_hw *hw)
552{ 576{
553 struct mwl8k_priv *priv = hw->priv; 577 struct mwl8k_priv *priv = hw->priv;
554 struct firmware *fw = priv->fw.ucode; 578 struct firmware *fw = priv->fw_ucode;
555 struct mwl8k_device_info *di = priv->device_info;
556 int rc; 579 int rc;
557 int loops; 580 int loops;
558 581
559 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) { 582 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
560 struct firmware *helper = priv->fw.helper; 583 struct firmware *helper = priv->fw_helper;
561 584
562 if (helper == NULL) { 585 if (helper == NULL) {
563 printk(KERN_ERR "%s: helper image needed but none " 586 printk(KERN_ERR "%s: helper image needed but none "
@@ -584,10 +607,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
584 return rc; 607 return rc;
585 } 608 }
586 609
587 if (di->modes & BIT(NL80211_IFTYPE_AP)) 610 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
588 iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
589 else
590 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
591 611
592 loops = 500000; 612 loops = 500000;
593 do { 613 do {
@@ -610,91 +630,6 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
610} 630}
611 631
612 632
613/*
614 * Defines shared between transmission and reception.
615 */
616/* HT control fields for firmware */
617struct ewc_ht_info {
618 __le16 control1;
619 __le16 control2;
620 __le16 control3;
621} __attribute__((packed));
622
623/* Firmware Station database operations */
624#define MWL8K_STA_DB_ADD_ENTRY 0
625#define MWL8K_STA_DB_MODIFY_ENTRY 1
626#define MWL8K_STA_DB_DEL_ENTRY 2
627#define MWL8K_STA_DB_FLUSH 3
628
629/* Peer Entry flags - used to define the type of the peer node */
630#define MWL8K_PEER_TYPE_ACCESSPOINT 2
631
632struct peer_capability_info {
633 /* Peer type - AP vs. STA. */
634 __u8 peer_type;
635
636 /* Basic 802.11 capabilities from assoc resp. */
637 __le16 basic_caps;
638
639 /* Set if peer supports 802.11n high throughput (HT). */
640 __u8 ht_support;
641
642 /* Valid if HT is supported. */
643 __le16 ht_caps;
644 __u8 extended_ht_caps;
645 struct ewc_ht_info ewc_info;
646
647 /* Legacy rate table. Intersection of our rates and peer rates. */
648 __u8 legacy_rates[12];
649
650 /* HT rate table. Intersection of our rates and peer rates. */
651 __u8 ht_rates[16];
652 __u8 pad[16];
653
654 /* If set, interoperability mode, no proprietary extensions. */
655 __u8 interop;
656 __u8 pad2;
657 __u8 station_id;
658 __le16 amsdu_enabled;
659} __attribute__((packed));
660
661/* Inline functions to manipulate QoS field in data descriptor. */
662static inline u16 mwl8k_qos_setbit_eosp(u16 qos)
663{
664 u16 val_mask = 1 << 4;
665
666 /* End of Service Period Bit 4 */
667 return qos | val_mask;
668}
669
670static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy)
671{
672 u16 val_mask = 0x3;
673 u8 shift = 5;
674 u16 qos_mask = ~(val_mask << shift);
675
676 /* Ack Policy Bit 5-6 */
677 return (qos & qos_mask) | ((ack_policy & val_mask) << shift);
678}
679
680static inline u16 mwl8k_qos_setbit_amsdu(u16 qos)
681{
682 u16 val_mask = 1 << 7;
683
684 /* AMSDU present Bit 7 */
685 return qos | val_mask;
686}
687
688static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
689{
690 u16 val_mask = 0xff;
691 u8 shift = 8;
692 u16 qos_mask = ~(val_mask << shift);
693
694 /* Queue Length Bits 8-15 */
695 return (qos & qos_mask) | ((len & val_mask) << shift);
696}
697
698/* DMA header used by firmware and hardware. */ 633/* DMA header used by firmware and hardware. */
699struct mwl8k_dma_data { 634struct mwl8k_dma_data {
700 __le16 fwlen; 635 __le16 fwlen;
@@ -761,9 +696,9 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
761 696
762 697
763/* 698/*
764 * Packet reception for 88w8366. 699 * Packet reception for 88w8366 AP firmware.
765 */ 700 */
766struct mwl8k_rxd_8366 { 701struct mwl8k_rxd_8366_ap {
767 __le16 pkt_len; 702 __le16 pkt_len;
768 __u8 sq2; 703 __u8 sq2;
769 __u8 rate; 704 __u8 rate;
@@ -781,23 +716,23 @@ struct mwl8k_rxd_8366 {
781 __u8 rx_ctrl; 716 __u8 rx_ctrl;
782} __attribute__((packed)); 717} __attribute__((packed));
783 718
784#define MWL8K_8366_RATE_INFO_MCS_FORMAT 0x80 719#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
785#define MWL8K_8366_RATE_INFO_40MHZ 0x40 720#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
786#define MWL8K_8366_RATE_INFO_RATEID(x) ((x) & 0x3f) 721#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
787 722
788#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80 723#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
789 724
790static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr) 725static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
791{ 726{
792 struct mwl8k_rxd_8366 *rxd = _rxd; 727 struct mwl8k_rxd_8366_ap *rxd = _rxd;
793 728
794 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); 729 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
795 rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST; 730 rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST;
796} 731}
797 732
798static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len) 733static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
799{ 734{
800 struct mwl8k_rxd_8366 *rxd = _rxd; 735 struct mwl8k_rxd_8366_ap *rxd = _rxd;
801 736
802 rxd->pkt_len = cpu_to_le16(len); 737 rxd->pkt_len = cpu_to_le16(len);
803 rxd->pkt_phys_addr = cpu_to_le32(addr); 738 rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -806,12 +741,12 @@ static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
806} 741}
807 742
808static int 743static int
809mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status, 744mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
810 __le16 *qos) 745 __le16 *qos)
811{ 746{
812 struct mwl8k_rxd_8366 *rxd = _rxd; 747 struct mwl8k_rxd_8366_ap *rxd = _rxd;
813 748
814 if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST)) 749 if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST))
815 return -1; 750 return -1;
816 rmb(); 751 rmb();
817 752
@@ -820,23 +755,29 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
820 status->signal = -rxd->rssi; 755 status->signal = -rxd->rssi;
821 status->noise = -rxd->noise_floor; 756 status->noise = -rxd->noise_floor;
822 757
823 if (rxd->rate & MWL8K_8366_RATE_INFO_MCS_FORMAT) { 758 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
824 status->flag |= RX_FLAG_HT; 759 status->flag |= RX_FLAG_HT;
825 if (rxd->rate & MWL8K_8366_RATE_INFO_40MHZ) 760 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ)
826 status->flag |= RX_FLAG_40MHZ; 761 status->flag |= RX_FLAG_40MHZ;
827 status->rate_idx = MWL8K_8366_RATE_INFO_RATEID(rxd->rate); 762 status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate);
828 } else { 763 } else {
829 int i; 764 int i;
830 765
831 for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) { 766 for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) {
832 if (mwl8k_rates[i].hw_value == rxd->rate) { 767 if (mwl8k_rates_24[i].hw_value == rxd->rate) {
833 status->rate_idx = i; 768 status->rate_idx = i;
834 break; 769 break;
835 } 770 }
836 } 771 }
837 } 772 }
838 773
839 status->band = IEEE80211_BAND_2GHZ; 774 if (rxd->channel > 14) {
775 status->band = IEEE80211_BAND_5GHZ;
776 if (!(status->flag & RX_FLAG_HT))
777 status->rate_idx -= 5;
778 } else {
779 status->band = IEEE80211_BAND_2GHZ;
780 }
840 status->freq = ieee80211_channel_to_frequency(rxd->channel); 781 status->freq = ieee80211_channel_to_frequency(rxd->channel);
841 782
842 *qos = rxd->qos_control; 783 *qos = rxd->qos_control;
@@ -844,17 +785,17 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
844 return le16_to_cpu(rxd->pkt_len); 785 return le16_to_cpu(rxd->pkt_len);
845} 786}
846 787
847static struct rxd_ops rxd_8366_ops = { 788static struct rxd_ops rxd_8366_ap_ops = {
848 .rxd_size = sizeof(struct mwl8k_rxd_8366), 789 .rxd_size = sizeof(struct mwl8k_rxd_8366_ap),
849 .rxd_init = mwl8k_rxd_8366_init, 790 .rxd_init = mwl8k_rxd_8366_ap_init,
850 .rxd_refill = mwl8k_rxd_8366_refill, 791 .rxd_refill = mwl8k_rxd_8366_ap_refill,
851 .rxd_process = mwl8k_rxd_8366_process, 792 .rxd_process = mwl8k_rxd_8366_ap_process,
852}; 793};
853 794
854/* 795/*
855 * Packet reception for 88w8687. 796 * Packet reception for STA firmware.
856 */ 797 */
857struct mwl8k_rxd_8687 { 798struct mwl8k_rxd_sta {
858 __le16 pkt_len; 799 __le16 pkt_len;
859 __u8 link_quality; 800 __u8 link_quality;
860 __u8 noise_level; 801 __u8 noise_level;
@@ -871,26 +812,26 @@ struct mwl8k_rxd_8687 {
871 __u8 pad2[2]; 812 __u8 pad2[2];
872} __attribute__((packed)); 813} __attribute__((packed));
873 814
874#define MWL8K_8687_RATE_INFO_SHORTPRE 0x8000 815#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
875#define MWL8K_8687_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) 816#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
876#define MWL8K_8687_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f) 817#define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
877#define MWL8K_8687_RATE_INFO_40MHZ 0x0004 818#define MWL8K_STA_RATE_INFO_40MHZ 0x0004
878#define MWL8K_8687_RATE_INFO_SHORTGI 0x0002 819#define MWL8K_STA_RATE_INFO_SHORTGI 0x0002
879#define MWL8K_8687_RATE_INFO_MCS_FORMAT 0x0001 820#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
880 821
881#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST 0x02 822#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
882 823
883static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr) 824static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
884{ 825{
885 struct mwl8k_rxd_8687 *rxd = _rxd; 826 struct mwl8k_rxd_sta *rxd = _rxd;
886 827
887 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); 828 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
888 rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST; 829 rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST;
889} 830}
890 831
891static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len) 832static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len)
892{ 833{
893 struct mwl8k_rxd_8687 *rxd = _rxd; 834 struct mwl8k_rxd_sta *rxd = _rxd;
894 835
895 rxd->pkt_len = cpu_to_le16(len); 836 rxd->pkt_len = cpu_to_le16(len);
896 rxd->pkt_phys_addr = cpu_to_le32(addr); 837 rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -899,13 +840,13 @@ static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
899} 840}
900 841
901static int 842static int
902mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status, 843mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
903 __le16 *qos) 844 __le16 *qos)
904{ 845{
905 struct mwl8k_rxd_8687 *rxd = _rxd; 846 struct mwl8k_rxd_sta *rxd = _rxd;
906 u16 rate_info; 847 u16 rate_info;
907 848
908 if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST)) 849 if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST))
909 return -1; 850 return -1;
910 rmb(); 851 rmb();
911 852
@@ -915,19 +856,25 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
915 856
916 status->signal = -rxd->rssi; 857 status->signal = -rxd->rssi;
917 status->noise = -rxd->noise_level; 858 status->noise = -rxd->noise_level;
918 status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info); 859 status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
919 status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info); 860 status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
920 861
921 if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE) 862 if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE)
922 status->flag |= RX_FLAG_SHORTPRE; 863 status->flag |= RX_FLAG_SHORTPRE;
923 if (rate_info & MWL8K_8687_RATE_INFO_40MHZ) 864 if (rate_info & MWL8K_STA_RATE_INFO_40MHZ)
924 status->flag |= RX_FLAG_40MHZ; 865 status->flag |= RX_FLAG_40MHZ;
925 if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI) 866 if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI)
926 status->flag |= RX_FLAG_SHORT_GI; 867 status->flag |= RX_FLAG_SHORT_GI;
927 if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT) 868 if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
928 status->flag |= RX_FLAG_HT; 869 status->flag |= RX_FLAG_HT;
929 870
930 status->band = IEEE80211_BAND_2GHZ; 871 if (rxd->channel > 14) {
872 status->band = IEEE80211_BAND_5GHZ;
873 if (!(status->flag & RX_FLAG_HT))
874 status->rate_idx -= 5;
875 } else {
876 status->band = IEEE80211_BAND_2GHZ;
877 }
931 status->freq = ieee80211_channel_to_frequency(rxd->channel); 878 status->freq = ieee80211_channel_to_frequency(rxd->channel);
932 879
933 *qos = rxd->qos_control; 880 *qos = rxd->qos_control;
@@ -935,11 +882,11 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
935 return le16_to_cpu(rxd->pkt_len); 882 return le16_to_cpu(rxd->pkt_len);
936} 883}
937 884
938static struct rxd_ops rxd_8687_ops = { 885static struct rxd_ops rxd_sta_ops = {
939 .rxd_size = sizeof(struct mwl8k_rxd_8687), 886 .rxd_size = sizeof(struct mwl8k_rxd_sta),
940 .rxd_init = mwl8k_rxd_8687_init, 887 .rxd_init = mwl8k_rxd_sta_init,
941 .rxd_refill = mwl8k_rxd_8687_refill, 888 .rxd_refill = mwl8k_rxd_sta_refill,
942 .rxd_process = mwl8k_rxd_8687_process, 889 .rxd_process = mwl8k_rxd_sta_process,
943}; 890};
944 891
945 892
@@ -1153,16 +1100,18 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1153 * Packet transmission. 1100 * Packet transmission.
1154 */ 1101 */
1155 1102
1156/* Transmit packet ACK policy */
1157#define MWL8K_TXD_ACK_POLICY_NORMAL 0
1158#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
1159
1160#define MWL8K_TXD_STATUS_OK 0x00000001 1103#define MWL8K_TXD_STATUS_OK 0x00000001
1161#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002 1104#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
1162#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004 1105#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
1163#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008 1106#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
1164#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000 1107#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
1165 1108
1109#define MWL8K_QOS_QLEN_UNSPEC 0xff00
1110#define MWL8K_QOS_ACK_POLICY_MASK 0x0060
1111#define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000
1112#define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060
1113#define MWL8K_QOS_EOSP 0x0010
1114
1166struct mwl8k_tx_desc { 1115struct mwl8k_tx_desc {
1167 __le32 status; 1116 __le32 status;
1168 __u8 data_rate; 1117 __u8 data_rate;
@@ -1272,7 +1221,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
1272/* 1221/*
1273 * Must be called with priv->fw_mutex held and tx queues stopped. 1222 * Must be called with priv->fw_mutex held and tx queues stopped.
1274 */ 1223 */
1275#define MWL8K_TX_WAIT_TIMEOUT_MS 1000 1224#define MWL8K_TX_WAIT_TIMEOUT_MS 5000
1276 1225
1277static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) 1226static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1278{ 1227{
@@ -1316,8 +1265,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1316 } 1265 }
1317 1266
1318 if (priv->pending_tx_pkts < oldcount) { 1267 if (priv->pending_tx_pkts < oldcount) {
1319 printk(KERN_NOTICE "%s: timeout waiting for tx " 1268 printk(KERN_NOTICE "%s: waiting for tx rings "
1320 "rings to drain (%d -> %d pkts), retrying\n", 1269 "to drain (%d -> %d pkts)\n",
1321 wiphy_name(hw->wiphy), oldcount, 1270 wiphy_name(hw->wiphy), oldcount,
1322 priv->pending_tx_pkts); 1271 priv->pending_tx_pkts);
1323 retry = 1; 1272 retry = 1;
@@ -1342,13 +1291,15 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1342 MWL8K_TXD_STATUS_OK_RETRY | \ 1291 MWL8K_TXD_STATUS_OK_RETRY | \
1343 MWL8K_TXD_STATUS_OK_MORE_RETRY)) 1292 MWL8K_TXD_STATUS_OK_MORE_RETRY))
1344 1293
1345static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force) 1294static int
1295mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1346{ 1296{
1347 struct mwl8k_priv *priv = hw->priv; 1297 struct mwl8k_priv *priv = hw->priv;
1348 struct mwl8k_tx_queue *txq = priv->txq + index; 1298 struct mwl8k_tx_queue *txq = priv->txq + index;
1349 int wake = 0; 1299 int processed;
1350 1300
1351 while (txq->stats.len > 0) { 1301 processed = 0;
1302 while (txq->stats.len > 0 && limit--) {
1352 int tx; 1303 int tx;
1353 struct mwl8k_tx_desc *tx_desc; 1304 struct mwl8k_tx_desc *tx_desc;
1354 unsigned long addr; 1305 unsigned long addr;
@@ -1395,11 +1346,13 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1395 1346
1396 ieee80211_tx_status_irqsafe(hw, skb); 1347 ieee80211_tx_status_irqsafe(hw, skb);
1397 1348
1398 wake = 1; 1349 processed++;
1399 } 1350 }
1400 1351
1401 if (wake && priv->radio_on && !mutex_is_locked(&priv->fw_mutex)) 1352 if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
1402 ieee80211_wake_queue(hw, index); 1353 ieee80211_wake_queue(hw, index);
1354
1355 return processed;
1403} 1356}
1404 1357
1405/* must be called only when the card's transmit is completely halted */ 1358/* must be called only when the card's transmit is completely halted */
@@ -1408,7 +1361,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1408 struct mwl8k_priv *priv = hw->priv; 1361 struct mwl8k_priv *priv = hw->priv;
1409 struct mwl8k_tx_queue *txq = priv->txq + index; 1362 struct mwl8k_tx_queue *txq = priv->txq + index;
1410 1363
1411 mwl8k_txq_reclaim(hw, index, 1); 1364 mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
1412 1365
1413 kfree(txq->skb); 1366 kfree(txq->skb);
1414 txq->skb = NULL; 1367 txq->skb = NULL;
@@ -1446,11 +1399,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1446 mwl8k_vif = MWL8K_VIF(tx_info->control.vif); 1399 mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
1447 1400
1448 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1401 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1449 u16 seqno = mwl8k_vif->seqno;
1450
1451 wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1402 wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1452 wh->seq_ctrl |= cpu_to_le16(seqno << 4); 1403 wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno);
1453 mwl8k_vif->seqno = seqno++ % 4096; 1404 mwl8k_vif->seqno += 0x10;
1454 } 1405 }
1455 1406
1456 /* Setup firmware control bit fields for each frame type. */ 1407 /* Setup firmware control bit fields for each frame type. */
@@ -1459,24 +1410,17 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1459 if (ieee80211_is_mgmt(wh->frame_control) || 1410 if (ieee80211_is_mgmt(wh->frame_control) ||
1460 ieee80211_is_ctl(wh->frame_control)) { 1411 ieee80211_is_ctl(wh->frame_control)) {
1461 txdatarate = 0; 1412 txdatarate = 0;
1462 qos = mwl8k_qos_setbit_eosp(qos); 1413 qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP;
1463 /* Set Queue size to unspecified */
1464 qos = mwl8k_qos_setbit_qlen(qos, 0xff);
1465 } else if (ieee80211_is_data(wh->frame_control)) { 1414 } else if (ieee80211_is_data(wh->frame_control)) {
1466 txdatarate = 1; 1415 txdatarate = 1;
1467 if (is_multicast_ether_addr(wh->addr1)) 1416 if (is_multicast_ether_addr(wh->addr1))
1468 txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX; 1417 txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX;
1469 1418
1470 /* Send pkt in an aggregate if AMPDU frame. */ 1419 qos &= ~MWL8K_QOS_ACK_POLICY_MASK;
1471 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) 1420 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
1472 qos = mwl8k_qos_setbit_ack(qos, 1421 qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK;
1473 MWL8K_TXD_ACK_POLICY_BLOCKACK);
1474 else 1422 else
1475 qos = mwl8k_qos_setbit_ack(qos, 1423 qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
1476 MWL8K_TXD_ACK_POLICY_NORMAL);
1477
1478 if (qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
1479 qos = mwl8k_qos_setbit_amsdu(qos);
1480 } 1424 }
1481 1425
1482 dma = pci_map_single(priv->pdev, skb->data, 1426 dma = pci_map_single(priv->pdev, skb->data,
@@ -1503,7 +1447,10 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1503 tx->pkt_phys_addr = cpu_to_le32(dma); 1447 tx->pkt_phys_addr = cpu_to_le32(dma);
1504 tx->pkt_len = cpu_to_le16(skb->len); 1448 tx->pkt_len = cpu_to_le16(skb->len);
1505 tx->rate_info = 0; 1449 tx->rate_info = 0;
1506 tx->peer_id = mwl8k_vif->peer_id; 1450 if (!priv->ap_fw && tx_info->control.sta != NULL)
1451 tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
1452 else
1453 tx->peer_id = 0;
1507 wmb(); 1454 wmb();
1508 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); 1455 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
1509 1456
@@ -1656,6 +1603,56 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1656 return rc; 1603 return rc;
1657} 1604}
1658 1605
1606static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
1607 struct ieee80211_vif *vif,
1608 struct mwl8k_cmd_pkt *cmd)
1609{
1610 if (vif != NULL)
1611 cmd->macid = MWL8K_VIF(vif)->macid;
1612 return mwl8k_post_cmd(hw, cmd);
1613}
1614
1615/*
1616 * Setup code shared between STA and AP firmware images.
1617 */
1618static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
1619{
1620 struct mwl8k_priv *priv = hw->priv;
1621
1622 BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24));
1623 memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24));
1624
1625 BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
1626 memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
1627
1628 priv->band_24.band = IEEE80211_BAND_2GHZ;
1629 priv->band_24.channels = priv->channels_24;
1630 priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
1631 priv->band_24.bitrates = priv->rates_24;
1632 priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
1633
1634 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
1635}
1636
1637static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
1638{
1639 struct mwl8k_priv *priv = hw->priv;
1640
1641 BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50));
1642 memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50));
1643
1644 BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
1645 memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
1646
1647 priv->band_50.band = IEEE80211_BAND_5GHZ;
1648 priv->band_50.channels = priv->channels_50;
1649 priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
1650 priv->band_50.bitrates = priv->rates_50;
1651 priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
1652
1653 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
1654}
1655
1659/* 1656/*
1660 * CMD_GET_HW_SPEC (STA version). 1657 * CMD_GET_HW_SPEC (STA version).
1661 */ 1658 */
@@ -1678,6 +1675,89 @@ struct mwl8k_cmd_get_hw_spec_sta {
1678 __le32 total_rxd; 1675 __le32 total_rxd;
1679} __attribute__((packed)); 1676} __attribute__((packed));
1680 1677
1678#define MWL8K_CAP_MAX_AMSDU 0x20000000
1679#define MWL8K_CAP_GREENFIELD 0x08000000
1680#define MWL8K_CAP_AMPDU 0x04000000
1681#define MWL8K_CAP_RX_STBC 0x01000000
1682#define MWL8K_CAP_TX_STBC 0x00800000
1683#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000
1684#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000
1685#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000
1686#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000
1687#define MWL8K_CAP_DELAY_BA 0x00003000
1688#define MWL8K_CAP_MIMO 0x00000200
1689#define MWL8K_CAP_40MHZ 0x00000100
1690#define MWL8K_CAP_BAND_MASK 0x00000007
1691#define MWL8K_CAP_5GHZ 0x00000004
1692#define MWL8K_CAP_2GHZ4 0x00000001
1693
1694static void
1695mwl8k_set_ht_caps(struct ieee80211_hw *hw,
1696 struct ieee80211_supported_band *band, u32 cap)
1697{
1698 int rx_streams;
1699 int tx_streams;
1700
1701 band->ht_cap.ht_supported = 1;
1702
1703 if (cap & MWL8K_CAP_MAX_AMSDU)
1704 band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
1705 if (cap & MWL8K_CAP_GREENFIELD)
1706 band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
1707 if (cap & MWL8K_CAP_AMPDU) {
1708 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1709 band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
1710 band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
1711 }
1712 if (cap & MWL8K_CAP_RX_STBC)
1713 band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC;
1714 if (cap & MWL8K_CAP_TX_STBC)
1715 band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
1716 if (cap & MWL8K_CAP_SHORTGI_40MHZ)
1717 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
1718 if (cap & MWL8K_CAP_SHORTGI_20MHZ)
1719 band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
1720 if (cap & MWL8K_CAP_DELAY_BA)
1721 band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA;
1722 if (cap & MWL8K_CAP_40MHZ)
1723 band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1724
1725 rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK);
1726 tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK);
1727
1728 band->ht_cap.mcs.rx_mask[0] = 0xff;
1729 if (rx_streams >= 2)
1730 band->ht_cap.mcs.rx_mask[1] = 0xff;
1731 if (rx_streams >= 3)
1732 band->ht_cap.mcs.rx_mask[2] = 0xff;
1733 band->ht_cap.mcs.rx_mask[4] = 0x01;
1734 band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1735
1736 if (rx_streams != tx_streams) {
1737 band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
1738 band->ht_cap.mcs.tx_params |= (tx_streams - 1) <<
1739 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1740 }
1741}
1742
1743static void
1744mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
1745{
1746 struct mwl8k_priv *priv = hw->priv;
1747
1748 if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
1749 mwl8k_setup_2ghz_band(hw);
1750 if (caps & MWL8K_CAP_MIMO)
1751 mwl8k_set_ht_caps(hw, &priv->band_24, caps);
1752 }
1753
1754 if (caps & MWL8K_CAP_5GHZ) {
1755 mwl8k_setup_5ghz_band(hw);
1756 if (caps & MWL8K_CAP_MIMO)
1757 mwl8k_set_ht_caps(hw, &priv->band_50, caps);
1758 }
1759}
1760
1681static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw) 1761static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1682{ 1762{
1683 struct mwl8k_priv *priv = hw->priv; 1763 struct mwl8k_priv *priv = hw->priv;
@@ -1708,6 +1788,9 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1708 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 1788 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1709 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 1789 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
1710 priv->hw_rev = cmd->hw_rev; 1790 priv->hw_rev = cmd->hw_rev;
1791 mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
1792 priv->ap_macids_supported = 0x00000000;
1793 priv->sta_macids_supported = 0x00000001;
1711 } 1794 }
1712 1795
1713 kfree(cmd); 1796 kfree(cmd);
@@ -1761,6 +1844,9 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1761 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 1844 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1762 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 1845 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
1763 priv->hw_rev = cmd->hw_rev; 1846 priv->hw_rev = cmd->hw_rev;
1847 mwl8k_setup_2ghz_band(hw);
1848 priv->ap_macids_supported = 0x000000ff;
1849 priv->sta_macids_supported = 0x00000000;
1764 1850
1765 off = le32_to_cpu(cmd->wcbbase0) & 0xffff; 1851 off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
1766 iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off); 1852 iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
@@ -1806,7 +1892,9 @@ struct mwl8k_cmd_set_hw_spec {
1806 __le32 total_rxd; 1892 __le32 total_rxd;
1807} __attribute__((packed)); 1893} __attribute__((packed));
1808 1894
1809#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 1895#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
1896#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
1897#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
1810 1898
1811static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw) 1899static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
1812{ 1900{
@@ -1827,7 +1915,9 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
1827 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 1915 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1828 for (i = 0; i < MWL8K_TX_QUEUES; i++) 1916 for (i = 0; i < MWL8K_TX_QUEUES; i++)
1829 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); 1917 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1830 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT); 1918 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
1919 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
1920 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
1831 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 1921 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1832 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 1922 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
1833 1923
@@ -1897,9 +1987,9 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1897} 1987}
1898 1988
1899/* 1989/*
1900 * CMD_802_11_GET_STAT. 1990 * CMD_GET_STAT.
1901 */ 1991 */
1902struct mwl8k_cmd_802_11_get_stat { 1992struct mwl8k_cmd_get_stat {
1903 struct mwl8k_cmd_pkt header; 1993 struct mwl8k_cmd_pkt header;
1904 __le32 stats[64]; 1994 __le32 stats[64];
1905} __attribute__((packed)); 1995} __attribute__((packed));
@@ -1909,10 +1999,10 @@ struct mwl8k_cmd_802_11_get_stat {
1909#define MWL8K_STAT_FCS_ERROR 24 1999#define MWL8K_STAT_FCS_ERROR 24
1910#define MWL8K_STAT_RTS_SUCCESS 11 2000#define MWL8K_STAT_RTS_SUCCESS 11
1911 2001
1912static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw, 2002static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
1913 struct ieee80211_low_level_stats *stats) 2003 struct ieee80211_low_level_stats *stats)
1914{ 2004{
1915 struct mwl8k_cmd_802_11_get_stat *cmd; 2005 struct mwl8k_cmd_get_stat *cmd;
1916 int rc; 2006 int rc;
1917 2007
1918 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2008 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1939,9 +2029,9 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
1939} 2029}
1940 2030
1941/* 2031/*
1942 * CMD_802_11_RADIO_CONTROL. 2032 * CMD_RADIO_CONTROL.
1943 */ 2033 */
1944struct mwl8k_cmd_802_11_radio_control { 2034struct mwl8k_cmd_radio_control {
1945 struct mwl8k_cmd_pkt header; 2035 struct mwl8k_cmd_pkt header;
1946 __le16 action; 2036 __le16 action;
1947 __le16 control; 2037 __le16 control;
@@ -1949,10 +2039,10 @@ struct mwl8k_cmd_802_11_radio_control {
1949} __attribute__((packed)); 2039} __attribute__((packed));
1950 2040
1951static int 2041static int
1952mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force) 2042mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
1953{ 2043{
1954 struct mwl8k_priv *priv = hw->priv; 2044 struct mwl8k_priv *priv = hw->priv;
1955 struct mwl8k_cmd_802_11_radio_control *cmd; 2045 struct mwl8k_cmd_radio_control *cmd;
1956 int rc; 2046 int rc;
1957 2047
1958 if (enable == priv->radio_on && !force) 2048 if (enable == priv->radio_on && !force)
@@ -1977,36 +2067,32 @@ mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
1977 return rc; 2067 return rc;
1978} 2068}
1979 2069
1980static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw) 2070static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw)
1981{ 2071{
1982 return mwl8k_cmd_802_11_radio_control(hw, 0, 0); 2072 return mwl8k_cmd_radio_control(hw, 0, 0);
1983} 2073}
1984 2074
1985static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw) 2075static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw)
1986{ 2076{
1987 return mwl8k_cmd_802_11_radio_control(hw, 1, 0); 2077 return mwl8k_cmd_radio_control(hw, 1, 0);
1988} 2078}
1989 2079
1990static int 2080static int
1991mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble) 2081mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
1992{ 2082{
1993 struct mwl8k_priv *priv; 2083 struct mwl8k_priv *priv = hw->priv;
1994
1995 if (hw == NULL || hw->priv == NULL)
1996 return -EINVAL;
1997 priv = hw->priv;
1998 2084
1999 priv->radio_short_preamble = short_preamble; 2085 priv->radio_short_preamble = short_preamble;
2000 2086
2001 return mwl8k_cmd_802_11_radio_control(hw, 1, 1); 2087 return mwl8k_cmd_radio_control(hw, 1, 1);
2002} 2088}
2003 2089
2004/* 2090/*
2005 * CMD_802_11_RF_TX_POWER. 2091 * CMD_RF_TX_POWER.
2006 */ 2092 */
2007#define MWL8K_TX_POWER_LEVEL_TOTAL 8 2093#define MWL8K_TX_POWER_LEVEL_TOTAL 8
2008 2094
2009struct mwl8k_cmd_802_11_rf_tx_power { 2095struct mwl8k_cmd_rf_tx_power {
2010 struct mwl8k_cmd_pkt header; 2096 struct mwl8k_cmd_pkt header;
2011 __le16 action; 2097 __le16 action;
2012 __le16 support_level; 2098 __le16 support_level;
@@ -2015,9 +2101,9 @@ struct mwl8k_cmd_802_11_rf_tx_power {
2015 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; 2101 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
2016} __attribute__((packed)); 2102} __attribute__((packed));
2017 2103
2018static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm) 2104static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
2019{ 2105{
2020 struct mwl8k_cmd_802_11_rf_tx_power *cmd; 2106 struct mwl8k_cmd_rf_tx_power *cmd;
2021 int rc; 2107 int rc;
2022 2108
2023 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2109 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2069,6 +2155,36 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
2069} 2155}
2070 2156
2071/* 2157/*
2158 * CMD_SET_BEACON.
2159 */
2160struct mwl8k_cmd_set_beacon {
2161 struct mwl8k_cmd_pkt header;
2162 __le16 beacon_len;
2163 __u8 beacon[0];
2164};
2165
2166static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
2167 struct ieee80211_vif *vif, u8 *beacon, int len)
2168{
2169 struct mwl8k_cmd_set_beacon *cmd;
2170 int rc;
2171
2172 cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
2173 if (cmd == NULL)
2174 return -ENOMEM;
2175
2176 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
2177 cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
2178 cmd->beacon_len = cpu_to_le16(len);
2179 memcpy(cmd->beacon, beacon, len);
2180
2181 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2182 kfree(cmd);
2183
2184 return rc;
2185}
2186
2187/*
2072 * CMD_SET_PRE_SCAN. 2188 * CMD_SET_PRE_SCAN.
2073 */ 2189 */
2074struct mwl8k_cmd_set_pre_scan { 2190struct mwl8k_cmd_set_pre_scan {
@@ -2103,7 +2219,7 @@ struct mwl8k_cmd_set_post_scan {
2103} __attribute__((packed)); 2219} __attribute__((packed));
2104 2220
2105static int 2221static int
2106mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac) 2222mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
2107{ 2223{
2108 struct mwl8k_cmd_set_post_scan *cmd; 2224 struct mwl8k_cmd_set_post_scan *cmd;
2109 int rc; 2225 int rc;
@@ -2134,8 +2250,9 @@ struct mwl8k_cmd_set_rf_channel {
2134} __attribute__((packed)); 2250} __attribute__((packed));
2135 2251
2136static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, 2252static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2137 struct ieee80211_channel *channel) 2253 struct ieee80211_conf *conf)
2138{ 2254{
2255 struct ieee80211_channel *channel = conf->channel;
2139 struct mwl8k_cmd_set_rf_channel *cmd; 2256 struct mwl8k_cmd_set_rf_channel *cmd;
2140 int rc; 2257 int rc;
2141 2258
@@ -2147,10 +2264,19 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2147 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2264 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2148 cmd->action = cpu_to_le16(MWL8K_CMD_SET); 2265 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2149 cmd->current_channel = channel->hw_value; 2266 cmd->current_channel = channel->hw_value;
2267
2150 if (channel->band == IEEE80211_BAND_2GHZ) 2268 if (channel->band == IEEE80211_BAND_2GHZ)
2151 cmd->channel_flags = cpu_to_le32(0x00000081); 2269 cmd->channel_flags |= cpu_to_le32(0x00000001);
2152 else 2270 else if (channel->band == IEEE80211_BAND_5GHZ)
2153 cmd->channel_flags = cpu_to_le32(0x00000000); 2271 cmd->channel_flags |= cpu_to_le32(0x00000004);
2272
2273 if (conf->channel_type == NL80211_CHAN_NO_HT ||
2274 conf->channel_type == NL80211_CHAN_HT20)
2275 cmd->channel_flags |= cpu_to_le32(0x00000080);
2276 else if (conf->channel_type == NL80211_CHAN_HT40MINUS)
2277 cmd->channel_flags |= cpu_to_le32(0x000001900);
2278 else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
2279 cmd->channel_flags |= cpu_to_le32(0x000000900);
2154 2280
2155 rc = mwl8k_post_cmd(hw, &cmd->header); 2281 rc = mwl8k_post_cmd(hw, &cmd->header);
2156 kfree(cmd); 2282 kfree(cmd);
@@ -2159,85 +2285,75 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2159} 2285}
2160 2286
2161/* 2287/*
2162 * CMD_SET_SLOT. 2288 * CMD_SET_AID.
2163 */ 2289 */
2164struct mwl8k_cmd_set_slot { 2290#define MWL8K_FRAME_PROT_DISABLED 0x00
2165 struct mwl8k_cmd_pkt header; 2291#define MWL8K_FRAME_PROT_11G 0x07
2166 __le16 action; 2292#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
2167 __u8 short_slot; 2293#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
2168} __attribute__((packed));
2169
2170static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
2171{
2172 struct mwl8k_cmd_set_slot *cmd;
2173 int rc;
2174
2175 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2176 if (cmd == NULL)
2177 return -ENOMEM;
2178
2179 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
2180 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2181 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2182 cmd->short_slot = short_slot_time;
2183
2184 rc = mwl8k_post_cmd(hw, &cmd->header);
2185 kfree(cmd);
2186 2294
2187 return rc; 2295struct mwl8k_cmd_update_set_aid {
2188} 2296 struct mwl8k_cmd_pkt header;
2297 __le16 aid;
2189 2298
2190/* 2299 /* AP's MAC address (BSSID) */
2191 * CMD_MIMO_CONFIG. 2300 __u8 bssid[ETH_ALEN];
2192 */ 2301 __le16 protection_mode;
2193struct mwl8k_cmd_mimo_config { 2302 __u8 supp_rates[14];
2194 struct mwl8k_cmd_pkt header;
2195 __le32 action;
2196 __u8 rx_antenna_map;
2197 __u8 tx_antenna_map;
2198} __attribute__((packed)); 2303} __attribute__((packed));
2199 2304
2200static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) 2305static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
2201{ 2306{
2202 struct mwl8k_cmd_mimo_config *cmd; 2307 int i;
2203 int rc; 2308 int j;
2204
2205 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2206 if (cmd == NULL)
2207 return -ENOMEM;
2208
2209 cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
2210 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2211 cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
2212 cmd->rx_antenna_map = rx;
2213 cmd->tx_antenna_map = tx;
2214 2309
2215 rc = mwl8k_post_cmd(hw, &cmd->header); 2310 /*
2216 kfree(cmd); 2311 * Clear nonstandard rates 4 and 13.
2312 */
2313 mask &= 0x1fef;
2217 2314
2218 return rc; 2315 for (i = 0, j = 0; i < 14; i++) {
2316 if (mask & (1 << i))
2317 rates[j++] = mwl8k_rates_24[i].hw_value;
2318 }
2219} 2319}
2220 2320
2221/* 2321static int
2222 * CMD_ENABLE_SNIFFER. 2322mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
2223 */ 2323 struct ieee80211_vif *vif, u32 legacy_rate_mask)
2224struct mwl8k_cmd_enable_sniffer {
2225 struct mwl8k_cmd_pkt header;
2226 __le32 action;
2227} __attribute__((packed));
2228
2229static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2230{ 2324{
2231 struct mwl8k_cmd_enable_sniffer *cmd; 2325 struct mwl8k_cmd_update_set_aid *cmd;
2326 u16 prot_mode;
2232 int rc; 2327 int rc;
2233 2328
2234 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2329 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2235 if (cmd == NULL) 2330 if (cmd == NULL)
2236 return -ENOMEM; 2331 return -ENOMEM;
2237 2332
2238 cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER); 2333 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
2239 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2334 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2240 cmd->action = cpu_to_le32(!!enable); 2335 cmd->aid = cpu_to_le16(vif->bss_conf.aid);
2336 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
2337
2338 if (vif->bss_conf.use_cts_prot) {
2339 prot_mode = MWL8K_FRAME_PROT_11G;
2340 } else {
2341 switch (vif->bss_conf.ht_operation_mode &
2342 IEEE80211_HT_OP_MODE_PROTECTION) {
2343 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
2344 prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
2345 break;
2346 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
2347 prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
2348 break;
2349 default:
2350 prot_mode = MWL8K_FRAME_PROT_DISABLED;
2351 break;
2352 }
2353 }
2354 cmd->protection_mode = cpu_to_le16(prot_mode);
2355
2356 legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask);
2241 2357
2242 rc = mwl8k_post_cmd(hw, &cmd->header); 2358 rc = mwl8k_post_cmd(hw, &cmd->header);
2243 kfree(cmd); 2359 kfree(cmd);
@@ -2246,37 +2362,32 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2246} 2362}
2247 2363
2248/* 2364/*
2249 * CMD_SET_MAC_ADDR. 2365 * CMD_SET_RATE.
2250 */ 2366 */
2251struct mwl8k_cmd_set_mac_addr { 2367struct mwl8k_cmd_set_rate {
2252 struct mwl8k_cmd_pkt header; 2368 struct mwl8k_cmd_pkt header;
2253 union { 2369 __u8 legacy_rates[14];
2254 struct { 2370
2255 __le16 mac_type; 2371 /* Bitmap for supported MCS codes. */
2256 __u8 mac_addr[ETH_ALEN]; 2372 __u8 mcs_set[16];
2257 } mbss; 2373 __u8 reserved[16];
2258 __u8 mac_addr[ETH_ALEN];
2259 };
2260} __attribute__((packed)); 2374} __attribute__((packed));
2261 2375
2262static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac) 2376static int
2377mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2378 u32 legacy_rate_mask, u8 *mcs_rates)
2263{ 2379{
2264 struct mwl8k_priv *priv = hw->priv; 2380 struct mwl8k_cmd_set_rate *cmd;
2265 struct mwl8k_cmd_set_mac_addr *cmd;
2266 int rc; 2381 int rc;
2267 2382
2268 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2383 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2269 if (cmd == NULL) 2384 if (cmd == NULL)
2270 return -ENOMEM; 2385 return -ENOMEM;
2271 2386
2272 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR); 2387 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
2273 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2388 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2274 if (priv->ap_fw) { 2389 legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask);
2275 cmd->mbss.mac_type = 0; 2390 memcpy(cmd->mcs_set, mcs_rates, 16);
2276 memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
2277 } else {
2278 memcpy(cmd->mac_addr, mac, ETH_ALEN);
2279 }
2280 2391
2281 rc = mwl8k_post_cmd(hw, &cmd->header); 2392 rc = mwl8k_post_cmd(hw, &cmd->header);
2282 kfree(cmd); 2393 kfree(cmd);
@@ -2284,29 +2395,40 @@ static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
2284 return rc; 2395 return rc;
2285} 2396}
2286 2397
2287
2288/* 2398/*
2289 * CMD_SET_RATEADAPT_MODE. 2399 * CMD_FINALIZE_JOIN.
2290 */ 2400 */
2291struct mwl8k_cmd_set_rate_adapt_mode { 2401#define MWL8K_FJ_BEACON_MAXLEN 128
2402
2403struct mwl8k_cmd_finalize_join {
2292 struct mwl8k_cmd_pkt header; 2404 struct mwl8k_cmd_pkt header;
2293 __le16 action; 2405 __le32 sleep_interval; /* Number of beacon periods to sleep */
2294 __le16 mode; 2406 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
2295} __attribute__((packed)); 2407} __attribute__((packed));
2296 2408
2297static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode) 2409static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
2410 int framelen, int dtim)
2298{ 2411{
2299 struct mwl8k_cmd_set_rate_adapt_mode *cmd; 2412 struct mwl8k_cmd_finalize_join *cmd;
2413 struct ieee80211_mgmt *payload = frame;
2414 int payload_len;
2300 int rc; 2415 int rc;
2301 2416
2302 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2417 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2303 if (cmd == NULL) 2418 if (cmd == NULL)
2304 return -ENOMEM; 2419 return -ENOMEM;
2305 2420
2306 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE); 2421 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
2307 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2422 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2308 cmd->action = cpu_to_le16(MWL8K_CMD_SET); 2423 cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
2309 cmd->mode = cpu_to_le16(mode); 2424
2425 payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
2426 if (payload_len < 0)
2427 payload_len = 0;
2428 else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2429 payload_len = MWL8K_FJ_BEACON_MAXLEN;
2430
2431 memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
2310 2432
2311 rc = mwl8k_post_cmd(hw, &cmd->header); 2433 rc = mwl8k_post_cmd(hw, &cmd->header);
2312 kfree(cmd); 2434 kfree(cmd);
@@ -2315,59 +2437,57 @@ static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
2315} 2437}
2316 2438
2317/* 2439/*
2318 * CMD_SET_WMM_MODE. 2440 * CMD_SET_RTS_THRESHOLD.
2319 */ 2441 */
2320struct mwl8k_cmd_set_wmm { 2442struct mwl8k_cmd_set_rts_threshold {
2321 struct mwl8k_cmd_pkt header; 2443 struct mwl8k_cmd_pkt header;
2322 __le16 action; 2444 __le16 action;
2445 __le16 threshold;
2323} __attribute__((packed)); 2446} __attribute__((packed));
2324 2447
2325static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable) 2448static int
2449mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
2326{ 2450{
2327 struct mwl8k_priv *priv = hw->priv; 2451 struct mwl8k_cmd_set_rts_threshold *cmd;
2328 struct mwl8k_cmd_set_wmm *cmd;
2329 int rc; 2452 int rc;
2330 2453
2331 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2454 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2332 if (cmd == NULL) 2455 if (cmd == NULL)
2333 return -ENOMEM; 2456 return -ENOMEM;
2334 2457
2335 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE); 2458 cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
2336 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2459 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2337 cmd->action = cpu_to_le16(!!enable); 2460 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2461 cmd->threshold = cpu_to_le16(rts_thresh);
2338 2462
2339 rc = mwl8k_post_cmd(hw, &cmd->header); 2463 rc = mwl8k_post_cmd(hw, &cmd->header);
2340 kfree(cmd); 2464 kfree(cmd);
2341 2465
2342 if (!rc)
2343 priv->wmm_enabled = enable;
2344
2345 return rc; 2466 return rc;
2346} 2467}
2347 2468
2348/* 2469/*
2349 * CMD_SET_RTS_THRESHOLD. 2470 * CMD_SET_SLOT.
2350 */ 2471 */
2351struct mwl8k_cmd_rts_threshold { 2472struct mwl8k_cmd_set_slot {
2352 struct mwl8k_cmd_pkt header; 2473 struct mwl8k_cmd_pkt header;
2353 __le16 action; 2474 __le16 action;
2354 __le16 threshold; 2475 __u8 short_slot;
2355} __attribute__((packed)); 2476} __attribute__((packed));
2356 2477
2357static int mwl8k_rts_threshold(struct ieee80211_hw *hw, 2478static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
2358 u16 action, u16 threshold)
2359{ 2479{
2360 struct mwl8k_cmd_rts_threshold *cmd; 2480 struct mwl8k_cmd_set_slot *cmd;
2361 int rc; 2481 int rc;
2362 2482
2363 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2483 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2364 if (cmd == NULL) 2484 if (cmd == NULL)
2365 return -ENOMEM; 2485 return -ENOMEM;
2366 2486
2367 cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD); 2487 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
2368 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2488 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2369 cmd->action = cpu_to_le16(action); 2489 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2370 cmd->threshold = cpu_to_le16(threshold); 2490 cmd->short_slot = short_slot_time;
2371 2491
2372 rc = mwl8k_post_cmd(hw, &cmd->header); 2492 rc = mwl8k_post_cmd(hw, &cmd->header);
2373 kfree(cmd); 2493 kfree(cmd);
@@ -2426,9 +2546,9 @@ struct mwl8k_cmd_set_edca_params {
2426 MWL8K_SET_EDCA_AIFS) 2546 MWL8K_SET_EDCA_AIFS)
2427 2547
2428static int 2548static int
2429mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum, 2549mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2430 __u16 cw_min, __u16 cw_max, 2550 __u16 cw_min, __u16 cw_max,
2431 __u8 aifs, __u16 txop) 2551 __u8 aifs, __u16 txop)
2432{ 2552{
2433 struct mwl8k_priv *priv = hw->priv; 2553 struct mwl8k_priv *priv = hw->priv;
2434 struct mwl8k_cmd_set_edca_params *cmd; 2554 struct mwl8k_cmd_set_edca_params *cmd;
@@ -2438,12 +2558,6 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2438 if (cmd == NULL) 2558 if (cmd == NULL)
2439 return -ENOMEM; 2559 return -ENOMEM;
2440 2560
2441 /*
2442 * Queues 0 (BE) and 1 (BK) are swapped in hardware for
2443 * this call.
2444 */
2445 qnum ^= !(qnum >> 1);
2446
2447 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS); 2561 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
2448 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2562 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2449 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL); 2563 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
@@ -2467,170 +2581,259 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2467} 2581}
2468 2582
2469/* 2583/*
2470 * CMD_FINALIZE_JOIN. 2584 * CMD_SET_WMM_MODE.
2471 */ 2585 */
2472#define MWL8K_FJ_BEACON_MAXLEN 128 2586struct mwl8k_cmd_set_wmm_mode {
2473
2474struct mwl8k_cmd_finalize_join {
2475 struct mwl8k_cmd_pkt header; 2587 struct mwl8k_cmd_pkt header;
2476 __le32 sleep_interval; /* Number of beacon periods to sleep */ 2588 __le16 action;
2477 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
2478} __attribute__((packed)); 2589} __attribute__((packed));
2479 2590
2480static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame, 2591static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
2481 int framelen, int dtim)
2482{ 2592{
2483 struct mwl8k_cmd_finalize_join *cmd; 2593 struct mwl8k_priv *priv = hw->priv;
2484 struct ieee80211_mgmt *payload = frame; 2594 struct mwl8k_cmd_set_wmm_mode *cmd;
2485 int payload_len;
2486 int rc; 2595 int rc;
2487 2596
2488 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2597 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2489 if (cmd == NULL) 2598 if (cmd == NULL)
2490 return -ENOMEM; 2599 return -ENOMEM;
2491 2600
2492 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN); 2601 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
2493 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2602 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2494 cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1); 2603 cmd->action = cpu_to_le16(!!enable);
2495
2496 payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
2497 if (payload_len < 0)
2498 payload_len = 0;
2499 else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2500 payload_len = MWL8K_FJ_BEACON_MAXLEN;
2501
2502 memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
2503 2604
2504 rc = mwl8k_post_cmd(hw, &cmd->header); 2605 rc = mwl8k_post_cmd(hw, &cmd->header);
2505 kfree(cmd); 2606 kfree(cmd);
2506 2607
2608 if (!rc)
2609 priv->wmm_enabled = enable;
2610
2507 return rc; 2611 return rc;
2508} 2612}
2509 2613
2510/* 2614/*
2511 * CMD_UPDATE_STADB. 2615 * CMD_MIMO_CONFIG.
2512 */ 2616 */
2513struct mwl8k_cmd_update_sta_db { 2617struct mwl8k_cmd_mimo_config {
2514 struct mwl8k_cmd_pkt header; 2618 struct mwl8k_cmd_pkt header;
2619 __le32 action;
2620 __u8 rx_antenna_map;
2621 __u8 tx_antenna_map;
2622} __attribute__((packed));
2515 2623
2516 /* See STADB_ACTION_TYPE */ 2624static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
2517 __le32 action; 2625{
2626 struct mwl8k_cmd_mimo_config *cmd;
2627 int rc;
2518 2628
2519 /* Peer MAC address */ 2629 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2520 __u8 peer_addr[ETH_ALEN]; 2630 if (cmd == NULL)
2631 return -ENOMEM;
2521 2632
2522 __le32 reserved; 2633 cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
2634 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2635 cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
2636 cmd->rx_antenna_map = rx;
2637 cmd->tx_antenna_map = tx;
2523 2638
2524 /* Peer info - valid during add/update. */ 2639 rc = mwl8k_post_cmd(hw, &cmd->header);
2525 struct peer_capability_info peer_info; 2640 kfree(cmd);
2641
2642 return rc;
2643}
2644
2645/*
2646 * CMD_USE_FIXED_RATE (STA version).
2647 */
2648struct mwl8k_cmd_use_fixed_rate_sta {
2649 struct mwl8k_cmd_pkt header;
2650 __le32 action;
2651 __le32 allow_rate_drop;
2652 __le32 num_rates;
2653 struct {
2654 __le32 is_ht_rate;
2655 __le32 enable_retry;
2656 __le32 rate;
2657 __le32 retry_count;
2658 } rate_entry[8];
2659 __le32 rate_type;
2660 __le32 reserved1;
2661 __le32 reserved2;
2526} __attribute__((packed)); 2662} __attribute__((packed));
2527 2663
2528static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw, 2664#define MWL8K_USE_AUTO_RATE 0x0002
2529 struct ieee80211_vif *vif, __u32 action) 2665#define MWL8K_UCAST_RATE 0
2666
2667static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
2530{ 2668{
2531 struct mwl8k_vif *mv_vif = MWL8K_VIF(vif); 2669 struct mwl8k_cmd_use_fixed_rate_sta *cmd;
2532 struct ieee80211_bss_conf *info = &mv_vif->bss_info;
2533 struct mwl8k_cmd_update_sta_db *cmd;
2534 struct peer_capability_info *peer_info;
2535 int rc; 2670 int rc;
2536 2671
2537 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2672 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2538 if (cmd == NULL) 2673 if (cmd == NULL)
2539 return -ENOMEM; 2674 return -ENOMEM;
2540 2675
2541 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB); 2676 cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
2542 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2677 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2678 cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
2679 cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE);
2543 2680
2544 cmd->action = cpu_to_le32(action); 2681 rc = mwl8k_post_cmd(hw, &cmd->header);
2545 peer_info = &cmd->peer_info; 2682 kfree(cmd);
2546 memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
2547 2683
2548 switch (action) { 2684 return rc;
2549 case MWL8K_STA_DB_ADD_ENTRY: 2685}
2550 case MWL8K_STA_DB_MODIFY_ENTRY:
2551 /* Build peer_info block */
2552 peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
2553 peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
2554 memcpy(peer_info->legacy_rates, mwl8k_rateids,
2555 sizeof(mwl8k_rateids));
2556 peer_info->interop = 1;
2557 peer_info->amsdu_enabled = 0;
2558
2559 rc = mwl8k_post_cmd(hw, &cmd->header);
2560 if (rc == 0)
2561 mv_vif->peer_id = peer_info->station_id;
2562 2686
2563 break; 2687/*
2688 * CMD_USE_FIXED_RATE (AP version).
2689 */
2690struct mwl8k_cmd_use_fixed_rate_ap {
2691 struct mwl8k_cmd_pkt header;
2692 __le32 action;
2693 __le32 allow_rate_drop;
2694 __le32 num_rates;
2695 struct mwl8k_rate_entry_ap {
2696 __le32 is_ht_rate;
2697 __le32 enable_retry;
2698 __le32 rate;
2699 __le32 retry_count;
2700 } rate_entry[4];
2701 u8 multicast_rate;
2702 u8 multicast_rate_type;
2703 u8 management_rate;
2704} __attribute__((packed));
2564 2705
2565 case MWL8K_STA_DB_DEL_ENTRY: 2706static int
2566 case MWL8K_STA_DB_FLUSH: 2707mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
2567 default: 2708{
2568 rc = mwl8k_post_cmd(hw, &cmd->header); 2709 struct mwl8k_cmd_use_fixed_rate_ap *cmd;
2569 if (rc == 0) 2710 int rc;
2570 mv_vif->peer_id = 0; 2711
2571 break; 2712 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2572 } 2713 if (cmd == NULL)
2714 return -ENOMEM;
2715
2716 cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
2717 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2718 cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
2719 cmd->multicast_rate = mcast;
2720 cmd->management_rate = mgmt;
2721
2722 rc = mwl8k_post_cmd(hw, &cmd->header);
2573 kfree(cmd); 2723 kfree(cmd);
2574 2724
2575 return rc; 2725 return rc;
2576} 2726}
2577 2727
2578/* 2728/*
2579 * CMD_SET_AID. 2729 * CMD_ENABLE_SNIFFER.
2580 */ 2730 */
2581#define MWL8K_FRAME_PROT_DISABLED 0x00 2731struct mwl8k_cmd_enable_sniffer {
2582#define MWL8K_FRAME_PROT_11G 0x07 2732 struct mwl8k_cmd_pkt header;
2583#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02 2733 __le32 action;
2584#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
2585
2586struct mwl8k_cmd_update_set_aid {
2587 struct mwl8k_cmd_pkt header;
2588 __le16 aid;
2589
2590 /* AP's MAC address (BSSID) */
2591 __u8 bssid[ETH_ALEN];
2592 __le16 protection_mode;
2593 __u8 supp_rates[14];
2594} __attribute__((packed)); 2734} __attribute__((packed));
2595 2735
2596static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw, 2736static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2597 struct ieee80211_vif *vif)
2598{ 2737{
2599 struct mwl8k_vif *mv_vif = MWL8K_VIF(vif); 2738 struct mwl8k_cmd_enable_sniffer *cmd;
2600 struct ieee80211_bss_conf *info = &mv_vif->bss_info;
2601 struct mwl8k_cmd_update_set_aid *cmd;
2602 u16 prot_mode;
2603 int rc; 2739 int rc;
2604 2740
2605 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2741 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2606 if (cmd == NULL) 2742 if (cmd == NULL)
2607 return -ENOMEM; 2743 return -ENOMEM;
2608 2744
2609 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID); 2745 cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
2610 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2746 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2611 cmd->aid = cpu_to_le16(info->aid); 2747 cmd->action = cpu_to_le32(!!enable);
2612 2748
2613 memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN); 2749 rc = mwl8k_post_cmd(hw, &cmd->header);
2750 kfree(cmd);
2614 2751
2615 if (info->use_cts_prot) { 2752 return rc;
2616 prot_mode = MWL8K_FRAME_PROT_11G; 2753}
2754
2755/*
2756 * CMD_SET_MAC_ADDR.
2757 */
2758struct mwl8k_cmd_set_mac_addr {
2759 struct mwl8k_cmd_pkt header;
2760 union {
2761 struct {
2762 __le16 mac_type;
2763 __u8 mac_addr[ETH_ALEN];
2764 } mbss;
2765 __u8 mac_addr[ETH_ALEN];
2766 };
2767} __attribute__((packed));
2768
2769#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
2770#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
2771#define MWL8K_MAC_TYPE_PRIMARY_AP 2
2772#define MWL8K_MAC_TYPE_SECONDARY_AP 3
2773
2774static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
2775 struct ieee80211_vif *vif, u8 *mac)
2776{
2777 struct mwl8k_priv *priv = hw->priv;
2778 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
2779 struct mwl8k_cmd_set_mac_addr *cmd;
2780 int mac_type;
2781 int rc;
2782
2783 mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
2784 if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
2785 if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
2786 mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
2787 else
2788 mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
2789 } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
2790 if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported))
2791 mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
2792 else
2793 mac_type = MWL8K_MAC_TYPE_SECONDARY_AP;
2794 }
2795
2796 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2797 if (cmd == NULL)
2798 return -ENOMEM;
2799
2800 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
2801 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2802 if (priv->ap_fw) {
2803 cmd->mbss.mac_type = cpu_to_le16(mac_type);
2804 memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
2617 } else { 2805 } else {
2618 switch (info->ht_operation_mode & 2806 memcpy(cmd->mac_addr, mac, ETH_ALEN);
2619 IEEE80211_HT_OP_MODE_PROTECTION) {
2620 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
2621 prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
2622 break;
2623 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
2624 prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
2625 break;
2626 default:
2627 prot_mode = MWL8K_FRAME_PROT_DISABLED;
2628 break;
2629 }
2630 } 2807 }
2631 cmd->protection_mode = cpu_to_le16(prot_mode);
2632 2808
2633 memcpy(cmd->supp_rates, mwl8k_rateids, sizeof(mwl8k_rateids)); 2809 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2810 kfree(cmd);
2811
2812 return rc;
2813}
2814
2815/*
2816 * CMD_SET_RATEADAPT_MODE.
2817 */
2818struct mwl8k_cmd_set_rate_adapt_mode {
2819 struct mwl8k_cmd_pkt header;
2820 __le16 action;
2821 __le16 mode;
2822} __attribute__((packed));
2823
2824static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
2825{
2826 struct mwl8k_cmd_set_rate_adapt_mode *cmd;
2827 int rc;
2828
2829 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2830 if (cmd == NULL)
2831 return -ENOMEM;
2832
2833 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
2834 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2835 cmd->action = cpu_to_le16(MWL8K_CMD_SET);
2836 cmd->mode = cpu_to_le16(mode);
2634 2837
2635 rc = mwl8k_post_cmd(hw, &cmd->header); 2838 rc = mwl8k_post_cmd(hw, &cmd->header);
2636 kfree(cmd); 2839 kfree(cmd);
@@ -2639,115 +2842,255 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
2639} 2842}
2640 2843
2641/* 2844/*
2642 * CMD_SET_RATE. 2845 * CMD_BSS_START.
2643 */ 2846 */
2644struct mwl8k_cmd_update_rateset { 2847struct mwl8k_cmd_bss_start {
2645 struct mwl8k_cmd_pkt header; 2848 struct mwl8k_cmd_pkt header;
2646 __u8 legacy_rates[14]; 2849 __le32 enable;
2647
2648 /* Bitmap for supported MCS codes. */
2649 __u8 mcs_set[16];
2650 __u8 reserved[16];
2651} __attribute__((packed)); 2850} __attribute__((packed));
2652 2851
2653static int mwl8k_update_rateset(struct ieee80211_hw *hw, 2852static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
2654 struct ieee80211_vif *vif) 2853 struct ieee80211_vif *vif, int enable)
2655{ 2854{
2656 struct mwl8k_cmd_update_rateset *cmd; 2855 struct mwl8k_cmd_bss_start *cmd;
2657 int rc; 2856 int rc;
2658 2857
2659 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2858 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2660 if (cmd == NULL) 2859 if (cmd == NULL)
2661 return -ENOMEM; 2860 return -ENOMEM;
2662 2861
2663 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE); 2862 cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START);
2664 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2863 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2665 memcpy(cmd->legacy_rates, mwl8k_rateids, sizeof(mwl8k_rateids)); 2864 cmd->enable = cpu_to_le32(enable);
2666 2865
2667 rc = mwl8k_post_cmd(hw, &cmd->header); 2866 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2668 kfree(cmd); 2867 kfree(cmd);
2669 2868
2670 return rc; 2869 return rc;
2671} 2870}
2672 2871
2673/* 2872/*
2674 * CMD_USE_FIXED_RATE. 2873 * CMD_SET_NEW_STN.
2675 */ 2874 */
2676#define MWL8K_RATE_TABLE_SIZE 8 2875struct mwl8k_cmd_set_new_stn {
2677#define MWL8K_UCAST_RATE 0 2876 struct mwl8k_cmd_pkt header;
2678#define MWL8K_USE_AUTO_RATE 0x0002 2877 __le16 aid;
2878 __u8 mac_addr[6];
2879 __le16 stn_id;
2880 __le16 action;
2881 __le16 rsvd;
2882 __le32 legacy_rates;
2883 __u8 ht_rates[4];
2884 __le16 cap_info;
2885 __le16 ht_capabilities_info;
2886 __u8 mac_ht_param_info;
2887 __u8 rev;
2888 __u8 control_channel;
2889 __u8 add_channel;
2890 __le16 op_mode;
2891 __le16 stbc;
2892 __u8 add_qos_info;
2893 __u8 is_qos_sta;
2894 __le32 fw_sta_ptr;
2895} __attribute__((packed));
2679 2896
2680struct mwl8k_rate_entry { 2897#define MWL8K_STA_ACTION_ADD 0
2681 /* Set to 1 if HT rate, 0 if legacy. */ 2898#define MWL8K_STA_ACTION_REMOVE 2
2682 __le32 is_ht_rate;
2683 2899
2684 /* Set to 1 to use retry_count field. */ 2900static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
2685 __le32 enable_retry; 2901 struct ieee80211_vif *vif,
2902 struct ieee80211_sta *sta)
2903{
2904 struct mwl8k_cmd_set_new_stn *cmd;
2905 u32 rates;
2906 int rc;
2686 2907
2687 /* Specified legacy rate or MCS. */ 2908 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2688 __le32 rate; 2909 if (cmd == NULL)
2910 return -ENOMEM;
2689 2911
2690 /* Number of allowed retries. */ 2912 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
2691 __le32 retry_count; 2913 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2914 cmd->aid = cpu_to_le16(sta->aid);
2915 memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
2916 cmd->stn_id = cpu_to_le16(sta->aid);
2917 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
2918 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
2919 rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
2920 else
2921 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
2922 cmd->legacy_rates = cpu_to_le32(rates);
2923 if (sta->ht_cap.ht_supported) {
2924 cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
2925 cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
2926 cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
2927 cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
2928 cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
2929 cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
2930 ((sta->ht_cap.ampdu_density & 7) << 2);
2931 cmd->is_qos_sta = 1;
2932 }
2933
2934 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2935 kfree(cmd);
2936
2937 return rc;
2938}
2939
2940static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw,
2941 struct ieee80211_vif *vif)
2942{
2943 struct mwl8k_cmd_set_new_stn *cmd;
2944 int rc;
2945
2946 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2947 if (cmd == NULL)
2948 return -ENOMEM;
2949
2950 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
2951 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2952 memcpy(cmd->mac_addr, vif->addr, ETH_ALEN);
2953
2954 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2955 kfree(cmd);
2956
2957 return rc;
2958}
2959
2960static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
2961 struct ieee80211_vif *vif, u8 *addr)
2962{
2963 struct mwl8k_cmd_set_new_stn *cmd;
2964 int rc;
2965
2966 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2967 if (cmd == NULL)
2968 return -ENOMEM;
2969
2970 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
2971 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2972 memcpy(cmd->mac_addr, addr, ETH_ALEN);
2973 cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE);
2974
2975 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
2976 kfree(cmd);
2977
2978 return rc;
2979}
2980
2981/*
2982 * CMD_UPDATE_STADB.
2983 */
2984struct ewc_ht_info {
2985 __le16 control1;
2986 __le16 control2;
2987 __le16 control3;
2692} __attribute__((packed)); 2988} __attribute__((packed));
2693 2989
2694struct mwl8k_rate_table { 2990struct peer_capability_info {
2695 /* 1 to allow specified rate and below */ 2991 /* Peer type - AP vs. STA. */
2696 __le32 allow_rate_drop; 2992 __u8 peer_type;
2697 __le32 num_rates; 2993
2698 struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE]; 2994 /* Basic 802.11 capabilities from assoc resp. */
2995 __le16 basic_caps;
2996
2997 /* Set if peer supports 802.11n high throughput (HT). */
2998 __u8 ht_support;
2999
3000 /* Valid if HT is supported. */
3001 __le16 ht_caps;
3002 __u8 extended_ht_caps;
3003 struct ewc_ht_info ewc_info;
3004
3005 /* Legacy rate table. Intersection of our rates and peer rates. */
3006 __u8 legacy_rates[12];
3007
3008 /* HT rate table. Intersection of our rates and peer rates. */
3009 __u8 ht_rates[16];
3010 __u8 pad[16];
3011
3012 /* If set, interoperability mode, no proprietary extensions. */
3013 __u8 interop;
3014 __u8 pad2;
3015 __u8 station_id;
3016 __le16 amsdu_enabled;
2699} __attribute__((packed)); 3017} __attribute__((packed));
2700 3018
2701struct mwl8k_cmd_use_fixed_rate { 3019struct mwl8k_cmd_update_stadb {
2702 struct mwl8k_cmd_pkt header; 3020 struct mwl8k_cmd_pkt header;
3021
3022 /* See STADB_ACTION_TYPE */
2703 __le32 action; 3023 __le32 action;
2704 struct mwl8k_rate_table rate_table;
2705 3024
2706 /* Unicast, Broadcast or Multicast */ 3025 /* Peer MAC address */
2707 __le32 rate_type; 3026 __u8 peer_addr[ETH_ALEN];
2708 __le32 reserved1; 3027
2709 __le32 reserved2; 3028 __le32 reserved;
3029
3030 /* Peer info - valid during add/update. */
3031 struct peer_capability_info peer_info;
2710} __attribute__((packed)); 3032} __attribute__((packed));
2711 3033
2712static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw, 3034#define MWL8K_STA_DB_MODIFY_ENTRY 1
2713 u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table) 3035#define MWL8K_STA_DB_DEL_ENTRY 2
3036
3037/* Peer Entry flags - used to define the type of the peer node */
3038#define MWL8K_PEER_TYPE_ACCESSPOINT 2
3039
3040static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
3041 struct ieee80211_vif *vif,
3042 struct ieee80211_sta *sta)
2714{ 3043{
2715 struct mwl8k_cmd_use_fixed_rate *cmd; 3044 struct mwl8k_cmd_update_stadb *cmd;
2716 int count; 3045 struct peer_capability_info *p;
3046 u32 rates;
2717 int rc; 3047 int rc;
2718 3048
2719 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 3049 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2720 if (cmd == NULL) 3050 if (cmd == NULL)
2721 return -ENOMEM; 3051 return -ENOMEM;
2722 3052
2723 cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE); 3053 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
2724 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 3054 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3055 cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY);
3056 memcpy(cmd->peer_addr, sta->addr, ETH_ALEN);
3057
3058 p = &cmd->peer_info;
3059 p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
3060 p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
3061 p->ht_support = sta->ht_cap.ht_supported;
3062 p->ht_caps = sta->ht_cap.cap;
3063 p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
3064 ((sta->ht_cap.ampdu_density & 7) << 2);
3065 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
3066 rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
3067 else
3068 rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
3069 legacy_rate_mask_to_array(p->legacy_rates, rates);
3070 memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
3071 p->interop = 1;
3072 p->amsdu_enabled = 0;
2725 3073
2726 cmd->action = cpu_to_le32(action); 3074 rc = mwl8k_post_cmd(hw, &cmd->header);
2727 cmd->rate_type = cpu_to_le32(rate_type); 3075 kfree(cmd);
2728 3076
2729 if (rate_table != NULL) { 3077 return rc ? rc : p->station_id;
2730 /* 3078}
2731 * Copy over each field manually so that endian 3079
2732 * conversion can be done. 3080static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
2733 */ 3081 struct ieee80211_vif *vif, u8 *addr)
2734 cmd->rate_table.allow_rate_drop = 3082{
2735 cpu_to_le32(rate_table->allow_rate_drop); 3083 struct mwl8k_cmd_update_stadb *cmd;
2736 cmd->rate_table.num_rates = 3084 int rc;
2737 cpu_to_le32(rate_table->num_rates); 3085
2738 3086 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2739 for (count = 0; count < rate_table->num_rates; count++) { 3087 if (cmd == NULL)
2740 struct mwl8k_rate_entry *dst = 3088 return -ENOMEM;
2741 &cmd->rate_table.rate_entry[count]; 3089
2742 struct mwl8k_rate_entry *src = 3090 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
2743 &rate_table->rate_entry[count]; 3091 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2744 3092 cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY);
2745 dst->is_ht_rate = cpu_to_le32(src->is_ht_rate); 3093 memcpy(cmd->peer_addr, addr, ETH_ALEN);
2746 dst->enable_retry = cpu_to_le32(src->enable_retry);
2747 dst->rate = cpu_to_le32(src->rate);
2748 dst->retry_count = cpu_to_le32(src->retry_count);
2749 }
2750 }
2751 3094
2752 rc = mwl8k_post_cmd(hw, &cmd->header); 3095 rc = mwl8k_post_cmd(hw, &cmd->header);
2753 kfree(cmd); 3096 kfree(cmd);
@@ -2766,19 +3109,22 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
2766 u32 status; 3109 u32 status;
2767 3110
2768 status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 3111 status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
2769 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
2770
2771 if (!status) 3112 if (!status)
2772 return IRQ_NONE; 3113 return IRQ_NONE;
2773 3114
2774 if (status & MWL8K_A2H_INT_TX_DONE) 3115 if (status & MWL8K_A2H_INT_TX_DONE) {
2775 tasklet_schedule(&priv->tx_reclaim_task); 3116 status &= ~MWL8K_A2H_INT_TX_DONE;
3117 tasklet_schedule(&priv->poll_tx_task);
3118 }
2776 3119
2777 if (status & MWL8K_A2H_INT_RX_READY) { 3120 if (status & MWL8K_A2H_INT_RX_READY) {
2778 while (rxq_process(hw, 0, 1)) 3121 status &= ~MWL8K_A2H_INT_RX_READY;
2779 rxq_refill(hw, 0, 1); 3122 tasklet_schedule(&priv->poll_rx_task);
2780 } 3123 }
2781 3124
3125 if (status)
3126 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3127
2782 if (status & MWL8K_A2H_INT_OPC_DONE) { 3128 if (status & MWL8K_A2H_INT_OPC_DONE) {
2783 if (priv->hostcmd_wait != NULL) 3129 if (priv->hostcmd_wait != NULL)
2784 complete(priv->hostcmd_wait); 3130 complete(priv->hostcmd_wait);
@@ -2793,6 +3139,53 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
2793 return IRQ_HANDLED; 3139 return IRQ_HANDLED;
2794} 3140}
2795 3141
3142static void mwl8k_tx_poll(unsigned long data)
3143{
3144 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
3145 struct mwl8k_priv *priv = hw->priv;
3146 int limit;
3147 int i;
3148
3149 limit = 32;
3150
3151 spin_lock_bh(&priv->tx_lock);
3152
3153 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3154 limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
3155
3156 if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
3157 complete(priv->tx_wait);
3158 priv->tx_wait = NULL;
3159 }
3160
3161 spin_unlock_bh(&priv->tx_lock);
3162
3163 if (limit) {
3164 writel(~MWL8K_A2H_INT_TX_DONE,
3165 priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3166 } else {
3167 tasklet_schedule(&priv->poll_tx_task);
3168 }
3169}
3170
3171static void mwl8k_rx_poll(unsigned long data)
3172{
3173 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
3174 struct mwl8k_priv *priv = hw->priv;
3175 int limit;
3176
3177 limit = 32;
3178 limit -= rxq_process(hw, 0, limit);
3179 limit -= rxq_refill(hw, 0, limit);
3180
3181 if (limit) {
3182 writel(~MWL8K_A2H_INT_RX_READY,
3183 priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3184 } else {
3185 tasklet_schedule(&priv->poll_rx_task);
3186 }
3187}
3188
2796 3189
2797/* 3190/*
2798 * Core driver operations. 3191 * Core driver operations.
@@ -2803,7 +3196,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2803 int index = skb_get_queue_mapping(skb); 3196 int index = skb_get_queue_mapping(skb);
2804 int rc; 3197 int rc;
2805 3198
2806 if (priv->current_channel == NULL) { 3199 if (!priv->radio_on) {
2807 printk(KERN_DEBUG "%s: dropped TX frame since radio " 3200 printk(KERN_DEBUG "%s: dropped TX frame since radio "
2808 "disabled\n", wiphy_name(hw->wiphy)); 3201 "disabled\n", wiphy_name(hw->wiphy));
2809 dev_kfree_skb(skb); 3202 dev_kfree_skb(skb);
@@ -2828,19 +3221,20 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2828 return -EIO; 3221 return -EIO;
2829 } 3222 }
2830 3223
2831 /* Enable tx reclaim tasklet */ 3224 /* Enable TX reclaim and RX tasklets. */
2832 tasklet_enable(&priv->tx_reclaim_task); 3225 tasklet_enable(&priv->poll_tx_task);
3226 tasklet_enable(&priv->poll_rx_task);
2833 3227
2834 /* Enable interrupts */ 3228 /* Enable interrupts */
2835 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3229 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
2836 3230
2837 rc = mwl8k_fw_lock(hw); 3231 rc = mwl8k_fw_lock(hw);
2838 if (!rc) { 3232 if (!rc) {
2839 rc = mwl8k_cmd_802_11_radio_enable(hw); 3233 rc = mwl8k_cmd_radio_enable(hw);
2840 3234
2841 if (!priv->ap_fw) { 3235 if (!priv->ap_fw) {
2842 if (!rc) 3236 if (!rc)
2843 rc = mwl8k_enable_sniffer(hw, 0); 3237 rc = mwl8k_cmd_enable_sniffer(hw, 0);
2844 3238
2845 if (!rc) 3239 if (!rc)
2846 rc = mwl8k_cmd_set_pre_scan(hw); 3240 rc = mwl8k_cmd_set_pre_scan(hw);
@@ -2851,10 +3245,10 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2851 } 3245 }
2852 3246
2853 if (!rc) 3247 if (!rc)
2854 rc = mwl8k_cmd_setrateadaptmode(hw, 0); 3248 rc = mwl8k_cmd_set_rateadapt_mode(hw, 0);
2855 3249
2856 if (!rc) 3250 if (!rc)
2857 rc = mwl8k_set_wmm(hw, 0); 3251 rc = mwl8k_cmd_set_wmm_mode(hw, 0);
2858 3252
2859 mwl8k_fw_unlock(hw); 3253 mwl8k_fw_unlock(hw);
2860 } 3254 }
@@ -2862,7 +3256,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2862 if (rc) { 3256 if (rc) {
2863 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3257 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
2864 free_irq(priv->pdev->irq, hw); 3258 free_irq(priv->pdev->irq, hw);
2865 tasklet_disable(&priv->tx_reclaim_task); 3259 tasklet_disable(&priv->poll_tx_task);
3260 tasklet_disable(&priv->poll_rx_task);
2866 } 3261 }
2867 3262
2868 return rc; 3263 return rc;
@@ -2873,7 +3268,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
2873 struct mwl8k_priv *priv = hw->priv; 3268 struct mwl8k_priv *priv = hw->priv;
2874 int i; 3269 int i;
2875 3270
2876 mwl8k_cmd_802_11_radio_disable(hw); 3271 mwl8k_cmd_radio_disable(hw);
2877 3272
2878 ieee80211_stop_queues(hw); 3273 ieee80211_stop_queues(hw);
2879 3274
@@ -2886,36 +3281,27 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
2886 if (priv->beacon_skb != NULL) 3281 if (priv->beacon_skb != NULL)
2887 dev_kfree_skb(priv->beacon_skb); 3282 dev_kfree_skb(priv->beacon_skb);
2888 3283
2889 /* Stop tx reclaim tasklet */ 3284 /* Stop TX reclaim and RX tasklets. */
2890 tasklet_disable(&priv->tx_reclaim_task); 3285 tasklet_disable(&priv->poll_tx_task);
3286 tasklet_disable(&priv->poll_rx_task);
2891 3287
2892 /* Return all skbs to mac80211 */ 3288 /* Return all skbs to mac80211 */
2893 for (i = 0; i < MWL8K_TX_QUEUES; i++) 3289 for (i = 0; i < MWL8K_TX_QUEUES; i++)
2894 mwl8k_txq_reclaim(hw, i, 1); 3290 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
2895} 3291}
2896 3292
2897static int mwl8k_add_interface(struct ieee80211_hw *hw, 3293static int mwl8k_add_interface(struct ieee80211_hw *hw,
2898 struct ieee80211_if_init_conf *conf) 3294 struct ieee80211_vif *vif)
2899{ 3295{
2900 struct mwl8k_priv *priv = hw->priv; 3296 struct mwl8k_priv *priv = hw->priv;
2901 struct mwl8k_vif *mwl8k_vif; 3297 struct mwl8k_vif *mwl8k_vif;
2902 3298 u32 macids_supported;
2903 /* 3299 int macid;
2904 * We only support one active interface at a time.
2905 */
2906 if (priv->vif != NULL)
2907 return -EBUSY;
2908
2909 /*
2910 * We only support managed interfaces for now.
2911 */
2912 if (conf->type != NL80211_IFTYPE_STATION)
2913 return -EINVAL;
2914 3300
2915 /* 3301 /*
2916 * Reject interface creation if sniffer mode is active, as 3302 * Reject interface creation if sniffer mode is active, as
2917 * STA operation is mutually exclusive with hardware sniffer 3303 * STA operation is mutually exclusive with hardware sniffer
2918 * mode. 3304 * mode. (Sniffer mode is only used on STA firmware.)
2919 */ 3305 */
2920 if (priv->sniffer_enabled) { 3306 if (priv->sniffer_enabled) {
2921 printk(KERN_INFO "%s: unable to create STA " 3307 printk(KERN_INFO "%s: unable to create STA "
@@ -2924,37 +3310,54 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
2924 return -EINVAL; 3310 return -EINVAL;
2925 } 3311 }
2926 3312
2927 /* Clean out driver private area */
2928 mwl8k_vif = MWL8K_VIF(conf->vif);
2929 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
2930 3313
2931 /* Set and save the mac address */ 3314 switch (vif->type) {
2932 mwl8k_set_mac_addr(hw, conf->mac_addr); 3315 case NL80211_IFTYPE_AP:
2933 memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN); 3316 macids_supported = priv->ap_macids_supported;
3317 break;
3318 case NL80211_IFTYPE_STATION:
3319 macids_supported = priv->sta_macids_supported;
3320 break;
3321 default:
3322 return -EINVAL;
3323 }
2934 3324
2935 /* Back pointer to parent config block */ 3325 macid = ffs(macids_supported & ~priv->macids_used);
2936 mwl8k_vif->priv = priv; 3326 if (!macid--)
3327 return -EBUSY;
2937 3328
2938 /* Set Initial sequence number to zero */ 3329 /* Setup driver private area. */
3330 mwl8k_vif = MWL8K_VIF(vif);
3331 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
3332 mwl8k_vif->vif = vif;
3333 mwl8k_vif->macid = macid;
2939 mwl8k_vif->seqno = 0; 3334 mwl8k_vif->seqno = 0;
2940 3335
2941 priv->vif = conf->vif; 3336 /* Set the mac address. */
2942 priv->current_channel = NULL; 3337 mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
3338
3339 if (priv->ap_fw)
3340 mwl8k_cmd_set_new_stn_add_self(hw, vif);
3341
3342 priv->macids_used |= 1 << mwl8k_vif->macid;
3343 list_add_tail(&mwl8k_vif->list, &priv->vif_list);
2943 3344
2944 return 0; 3345 return 0;
2945} 3346}
2946 3347
2947static void mwl8k_remove_interface(struct ieee80211_hw *hw, 3348static void mwl8k_remove_interface(struct ieee80211_hw *hw,
2948 struct ieee80211_if_init_conf *conf) 3349 struct ieee80211_vif *vif)
2949{ 3350{
2950 struct mwl8k_priv *priv = hw->priv; 3351 struct mwl8k_priv *priv = hw->priv;
3352 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
2951 3353
2952 if (priv->vif == NULL) 3354 if (priv->ap_fw)
2953 return; 3355 mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
2954 3356
2955 mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00"); 3357 mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
2956 3358
2957 priv->vif = NULL; 3359 priv->macids_used &= ~(1 << mwl8k_vif->macid);
3360 list_del(&mwl8k_vif->list);
2958} 3361}
2959 3362
2960static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) 3363static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2964,8 +3367,7 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
2964 int rc; 3367 int rc;
2965 3368
2966 if (conf->flags & IEEE80211_CONF_IDLE) { 3369 if (conf->flags & IEEE80211_CONF_IDLE) {
2967 mwl8k_cmd_802_11_radio_disable(hw); 3370 mwl8k_cmd_radio_disable(hw);
2968 priv->current_channel = NULL;
2969 return 0; 3371 return 0;
2970 } 3372 }
2971 3373
@@ -2973,19 +3375,17 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
2973 if (rc) 3375 if (rc)
2974 return rc; 3376 return rc;
2975 3377
2976 rc = mwl8k_cmd_802_11_radio_enable(hw); 3378 rc = mwl8k_cmd_radio_enable(hw);
2977 if (rc) 3379 if (rc)
2978 goto out; 3380 goto out;
2979 3381
2980 rc = mwl8k_cmd_set_rf_channel(hw, conf->channel); 3382 rc = mwl8k_cmd_set_rf_channel(hw, conf);
2981 if (rc) 3383 if (rc)
2982 goto out; 3384 goto out;
2983 3385
2984 priv->current_channel = conf->channel;
2985
2986 if (conf->power_level > 18) 3386 if (conf->power_level > 18)
2987 conf->power_level = 18; 3387 conf->power_level = 18;
2988 rc = mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level); 3388 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
2989 if (rc) 3389 if (rc)
2990 goto out; 3390 goto out;
2991 3391
@@ -3003,79 +3403,160 @@ out:
3003 return rc; 3403 return rc;
3004} 3404}
3005 3405
3006static void mwl8k_bss_info_changed(struct ieee80211_hw *hw, 3406static void
3007 struct ieee80211_vif *vif, 3407mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3008 struct ieee80211_bss_conf *info, 3408 struct ieee80211_bss_conf *info, u32 changed)
3009 u32 changed)
3010{ 3409{
3011 struct mwl8k_priv *priv = hw->priv; 3410 struct mwl8k_priv *priv = hw->priv;
3012 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); 3411 u32 ap_legacy_rates;
3412 u8 ap_mcs_rates[16];
3013 int rc; 3413 int rc;
3014 3414
3015 if ((changed & BSS_CHANGED_ASSOC) == 0) 3415 if (mwl8k_fw_lock(hw))
3016 return; 3416 return;
3017 3417
3018 priv->capture_beacon = false; 3418 /*
3019 3419 * No need to capture a beacon if we're no longer associated.
3020 rc = mwl8k_fw_lock(hw); 3420 */
3021 if (rc) 3421 if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
3022 return; 3422 priv->capture_beacon = false;
3023 3423
3024 if (info->assoc) { 3424 /*
3025 memcpy(&mwl8k_vif->bss_info, info, 3425 * Get the AP's legacy and MCS rates.
3026 sizeof(struct ieee80211_bss_conf)); 3426 */
3427 if (vif->bss_conf.assoc) {
3428 struct ieee80211_sta *ap;
3027 3429
3028 memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN); 3430 rcu_read_lock();
3029 3431
3030 /* Install rates */ 3432 ap = ieee80211_find_sta(vif, vif->bss_conf.bssid);
3031 rc = mwl8k_update_rateset(hw, vif); 3433 if (ap == NULL) {
3032 if (rc) 3434 rcu_read_unlock();
3033 goto out; 3435 goto out;
3436 }
3437
3438 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
3439 ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
3440 } else {
3441 ap_legacy_rates =
3442 ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
3443 }
3444 memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
3445
3446 rcu_read_unlock();
3447 }
3034 3448
3035 /* Turn on rate adaptation */ 3449 if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
3036 rc = mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE, 3450 rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
3037 MWL8K_UCAST_RATE, NULL);
3038 if (rc) 3451 if (rc)
3039 goto out; 3452 goto out;
3040 3453
3041 /* Set radio preamble */ 3454 rc = mwl8k_cmd_use_fixed_rate_sta(hw);
3042 rc = mwl8k_set_radio_preamble(hw, info->use_short_preamble);
3043 if (rc) 3455 if (rc)
3044 goto out; 3456 goto out;
3457 }
3045 3458
3046 /* Set slot time */ 3459 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3047 rc = mwl8k_cmd_set_slot(hw, info->use_short_slot); 3460 rc = mwl8k_set_radio_preamble(hw,
3461 vif->bss_conf.use_short_preamble);
3048 if (rc) 3462 if (rc)
3049 goto out; 3463 goto out;
3464 }
3050 3465
3051 /* Update peer rate info */ 3466 if (changed & BSS_CHANGED_ERP_SLOT) {
3052 rc = mwl8k_cmd_update_sta_db(hw, vif, 3467 rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
3053 MWL8K_STA_DB_MODIFY_ENTRY);
3054 if (rc) 3468 if (rc)
3055 goto out; 3469 goto out;
3470 }
3056 3471
3057 /* Set AID */ 3472 if (vif->bss_conf.assoc &&
3058 rc = mwl8k_cmd_set_aid(hw, vif); 3473 (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
3474 BSS_CHANGED_HT))) {
3475 rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
3059 if (rc) 3476 if (rc)
3060 goto out; 3477 goto out;
3478 }
3061 3479
3480 if (vif->bss_conf.assoc &&
3481 (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
3062 /* 3482 /*
3063 * Finalize the join. Tell rx handler to process 3483 * Finalize the join. Tell rx handler to process
3064 * next beacon from our BSSID. 3484 * next beacon from our BSSID.
3065 */ 3485 */
3066 memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN); 3486 memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN);
3067 priv->capture_beacon = true; 3487 priv->capture_beacon = true;
3068 } else {
3069 rc = mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
3070 memset(&mwl8k_vif->bss_info, 0,
3071 sizeof(struct ieee80211_bss_conf));
3072 memset(mwl8k_vif->bssid, 0, ETH_ALEN);
3073 } 3488 }
3074 3489
3075out: 3490out:
3076 mwl8k_fw_unlock(hw); 3491 mwl8k_fw_unlock(hw);
3077} 3492}
3078 3493
3494static void
3495mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3496 struct ieee80211_bss_conf *info, u32 changed)
3497{
3498 int rc;
3499
3500 if (mwl8k_fw_lock(hw))
3501 return;
3502
3503 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3504 rc = mwl8k_set_radio_preamble(hw,
3505 vif->bss_conf.use_short_preamble);
3506 if (rc)
3507 goto out;
3508 }
3509
3510 if (changed & BSS_CHANGED_BASIC_RATES) {
3511 int idx;
3512 int rate;
3513
3514 /*
3515 * Use lowest supported basic rate for multicasts
3516 * and management frames (such as probe responses --
3517 * beacons will always go out at 1 Mb/s).
3518 */
3519 idx = ffs(vif->bss_conf.basic_rates);
3520 if (idx)
3521 idx--;
3522
3523 if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
3524 rate = mwl8k_rates_24[idx].hw_value;
3525 else
3526 rate = mwl8k_rates_50[idx].hw_value;
3527
3528 mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
3529 }
3530
3531 if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
3532 struct sk_buff *skb;
3533
3534 skb = ieee80211_beacon_get(hw, vif);
3535 if (skb != NULL) {
3536 mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
3537 kfree_skb(skb);
3538 }
3539 }
3540
3541 if (changed & BSS_CHANGED_BEACON_ENABLED)
3542 mwl8k_cmd_bss_start(hw, vif, info->enable_beacon);
3543
3544out:
3545 mwl8k_fw_unlock(hw);
3546}
3547
3548static void
3549mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3550 struct ieee80211_bss_conf *info, u32 changed)
3551{
3552 struct mwl8k_priv *priv = hw->priv;
3553
3554 if (!priv->ap_fw)
3555 mwl8k_bss_info_changed_sta(hw, vif, info, changed);
3556 else
3557 mwl8k_bss_info_changed_ap(hw, vif, info, changed);
3558}
3559
3079static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, 3560static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
3080 int mc_count, struct dev_addr_list *mclist) 3561 int mc_count, struct dev_addr_list *mclist)
3081{ 3562{
@@ -3105,7 +3586,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3105 * operation, so refuse to enable sniffer mode if a STA 3586 * operation, so refuse to enable sniffer mode if a STA
3106 * interface is active. 3587 * interface is active.
3107 */ 3588 */
3108 if (priv->vif != NULL) { 3589 if (!list_empty(&priv->vif_list)) {
3109 if (net_ratelimit()) 3590 if (net_ratelimit())
3110 printk(KERN_INFO "%s: not enabling sniffer " 3591 printk(KERN_INFO "%s: not enabling sniffer "
3111 "mode because STA interface is active\n", 3592 "mode because STA interface is active\n",
@@ -3114,7 +3595,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3114 } 3595 }
3115 3596
3116 if (!priv->sniffer_enabled) { 3597 if (!priv->sniffer_enabled) {
3117 if (mwl8k_enable_sniffer(hw, 1)) 3598 if (mwl8k_cmd_enable_sniffer(hw, 1))
3118 return 0; 3599 return 0;
3119 priv->sniffer_enabled = true; 3600 priv->sniffer_enabled = true;
3120 } 3601 }
@@ -3126,6 +3607,14 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3126 return 1; 3607 return 1;
3127} 3608}
3128 3609
3610static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv)
3611{
3612 if (!list_empty(&priv->vif_list))
3613 return list_entry(priv->vif_list.next, struct mwl8k_vif, list);
3614
3615 return NULL;
3616}
3617
3129static void mwl8k_configure_filter(struct ieee80211_hw *hw, 3618static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3130 unsigned int changed_flags, 3619 unsigned int changed_flags,
3131 unsigned int *total_flags, 3620 unsigned int *total_flags,
@@ -3163,7 +3652,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3163 } 3652 }
3164 3653
3165 if (priv->sniffer_enabled) { 3654 if (priv->sniffer_enabled) {
3166 mwl8k_enable_sniffer(hw, 0); 3655 mwl8k_cmd_enable_sniffer(hw, 0);
3167 priv->sniffer_enabled = false; 3656 priv->sniffer_enabled = false;
3168 } 3657 }
3169 3658
@@ -3174,7 +3663,8 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3174 */ 3663 */
3175 mwl8k_cmd_set_pre_scan(hw); 3664 mwl8k_cmd_set_pre_scan(hw);
3176 } else { 3665 } else {
3177 u8 *bssid; 3666 struct mwl8k_vif *mwl8k_vif;
3667 const u8 *bssid;
3178 3668
3179 /* 3669 /*
3180 * Enable the BSS filter. 3670 * Enable the BSS filter.
@@ -3184,9 +3674,11 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3184 * (where the OUI part needs to be nonzero for 3674 * (where the OUI part needs to be nonzero for
3185 * the BSSID to be accepted by POST_SCAN). 3675 * the BSSID to be accepted by POST_SCAN).
3186 */ 3676 */
3187 bssid = "\x01\x00\x00\x00\x00\x00"; 3677 mwl8k_vif = mwl8k_first_vif(priv);
3188 if (priv->vif != NULL) 3678 if (mwl8k_vif != NULL)
3189 bssid = MWL8K_VIF(priv->vif)->bssid; 3679 bssid = mwl8k_vif->vif->bss_conf.bssid;
3680 else
3681 bssid = "\x01\x00\x00\x00\x00\x00";
3190 3682
3191 mwl8k_cmd_set_post_scan(hw, bssid); 3683 mwl8k_cmd_set_post_scan(hw, bssid);
3192 } 3684 }
@@ -3213,7 +3705,93 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3213 3705
3214static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3706static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3215{ 3707{
3216 return mwl8k_rts_threshold(hw, MWL8K_CMD_SET, value); 3708 return mwl8k_cmd_set_rts_threshold(hw, value);
3709}
3710
3711struct mwl8k_sta_notify_item
3712{
3713 struct list_head list;
3714 struct ieee80211_vif *vif;
3715 enum sta_notify_cmd cmd;
3716 struct ieee80211_sta sta;
3717};
3718
3719static void
3720mwl8k_do_sta_notify(struct ieee80211_hw *hw, struct mwl8k_sta_notify_item *s)
3721{
3722 struct mwl8k_priv *priv = hw->priv;
3723
3724 /*
3725 * STA firmware uses UPDATE_STADB, AP firmware uses SET_NEW_STN.
3726 */
3727 if (!priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
3728 int rc;
3729
3730 rc = mwl8k_cmd_update_stadb_add(hw, s->vif, &s->sta);
3731 if (rc >= 0) {
3732 struct ieee80211_sta *sta;
3733
3734 rcu_read_lock();
3735 sta = ieee80211_find_sta(s->vif, s->sta.addr);
3736 if (sta != NULL)
3737 MWL8K_STA(sta)->peer_id = rc;
3738 rcu_read_unlock();
3739 }
3740 } else if (!priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
3741 mwl8k_cmd_update_stadb_del(hw, s->vif, s->sta.addr);
3742 } else if (priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
3743 mwl8k_cmd_set_new_stn_add(hw, s->vif, &s->sta);
3744 } else if (priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
3745 mwl8k_cmd_set_new_stn_del(hw, s->vif, s->sta.addr);
3746 }
3747}
3748
3749static void mwl8k_sta_notify_worker(struct work_struct *work)
3750{
3751 struct mwl8k_priv *priv =
3752 container_of(work, struct mwl8k_priv, sta_notify_worker);
3753 struct ieee80211_hw *hw = priv->hw;
3754
3755 spin_lock_bh(&priv->sta_notify_list_lock);
3756 while (!list_empty(&priv->sta_notify_list)) {
3757 struct mwl8k_sta_notify_item *s;
3758
3759 s = list_entry(priv->sta_notify_list.next,
3760 struct mwl8k_sta_notify_item, list);
3761 list_del(&s->list);
3762
3763 spin_unlock_bh(&priv->sta_notify_list_lock);
3764
3765 mwl8k_do_sta_notify(hw, s);
3766 kfree(s);
3767
3768 spin_lock_bh(&priv->sta_notify_list_lock);
3769 }
3770 spin_unlock_bh(&priv->sta_notify_list_lock);
3771}
3772
3773static void
3774mwl8k_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3775 enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
3776{
3777 struct mwl8k_priv *priv = hw->priv;
3778 struct mwl8k_sta_notify_item *s;
3779
3780 if (cmd != STA_NOTIFY_ADD && cmd != STA_NOTIFY_REMOVE)
3781 return;
3782
3783 s = kmalloc(sizeof(*s), GFP_ATOMIC);
3784 if (s != NULL) {
3785 s->vif = vif;
3786 s->cmd = cmd;
3787 s->sta = *sta;
3788
3789 spin_lock(&priv->sta_notify_list_lock);
3790 list_add_tail(&s->list, &priv->sta_notify_list);
3791 spin_unlock(&priv->sta_notify_list_lock);
3792
3793 ieee80211_queue_work(hw, &priv->sta_notify_worker);
3794 }
3217} 3795}
3218 3796
3219static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, 3797static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3225,14 +3803,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3225 rc = mwl8k_fw_lock(hw); 3803 rc = mwl8k_fw_lock(hw);
3226 if (!rc) { 3804 if (!rc) {
3227 if (!priv->wmm_enabled) 3805 if (!priv->wmm_enabled)
3228 rc = mwl8k_set_wmm(hw, 1); 3806 rc = mwl8k_cmd_set_wmm_mode(hw, 1);
3229 3807
3230 if (!rc) 3808 if (!rc)
3231 rc = mwl8k_set_edca_params(hw, queue, 3809 rc = mwl8k_cmd_set_edca_params(hw, queue,
3232 params->cw_min, 3810 params->cw_min,
3233 params->cw_max, 3811 params->cw_max,
3234 params->aifs, 3812 params->aifs,
3235 params->txop); 3813 params->txop);
3236 3814
3237 mwl8k_fw_unlock(hw); 3815 mwl8k_fw_unlock(hw);
3238 } 3816 }
@@ -3261,7 +3839,23 @@ static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
3261static int mwl8k_get_stats(struct ieee80211_hw *hw, 3839static int mwl8k_get_stats(struct ieee80211_hw *hw,
3262 struct ieee80211_low_level_stats *stats) 3840 struct ieee80211_low_level_stats *stats)
3263{ 3841{
3264 return mwl8k_cmd_802_11_get_stat(hw, stats); 3842 return mwl8k_cmd_get_stat(hw, stats);
3843}
3844
3845static int
3846mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3847 enum ieee80211_ampdu_mlme_action action,
3848 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3849{
3850 switch (action) {
3851 case IEEE80211_AMPDU_RX_START:
3852 case IEEE80211_AMPDU_RX_STOP:
3853 if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
3854 return -ENOTSUPP;
3855 return 0;
3856 default:
3857 return -ENOTSUPP;
3858 }
3265} 3859}
3266 3860
3267static const struct ieee80211_ops mwl8k_ops = { 3861static const struct ieee80211_ops mwl8k_ops = {
@@ -3275,67 +3869,68 @@ static const struct ieee80211_ops mwl8k_ops = {
3275 .prepare_multicast = mwl8k_prepare_multicast, 3869 .prepare_multicast = mwl8k_prepare_multicast,
3276 .configure_filter = mwl8k_configure_filter, 3870 .configure_filter = mwl8k_configure_filter,
3277 .set_rts_threshold = mwl8k_set_rts_threshold, 3871 .set_rts_threshold = mwl8k_set_rts_threshold,
3872 .sta_notify = mwl8k_sta_notify,
3278 .conf_tx = mwl8k_conf_tx, 3873 .conf_tx = mwl8k_conf_tx,
3279 .get_tx_stats = mwl8k_get_tx_stats, 3874 .get_tx_stats = mwl8k_get_tx_stats,
3280 .get_stats = mwl8k_get_stats, 3875 .get_stats = mwl8k_get_stats,
3876 .ampdu_action = mwl8k_ampdu_action,
3281}; 3877};
3282 3878
3283static void mwl8k_tx_reclaim_handler(unsigned long data)
3284{
3285 int i;
3286 struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
3287 struct mwl8k_priv *priv = hw->priv;
3288
3289 spin_lock_bh(&priv->tx_lock);
3290 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3291 mwl8k_txq_reclaim(hw, i, 0);
3292
3293 if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
3294 complete(priv->tx_wait);
3295 priv->tx_wait = NULL;
3296 }
3297 spin_unlock_bh(&priv->tx_lock);
3298}
3299
3300static void mwl8k_finalize_join_worker(struct work_struct *work) 3879static void mwl8k_finalize_join_worker(struct work_struct *work)
3301{ 3880{
3302 struct mwl8k_priv *priv = 3881 struct mwl8k_priv *priv =
3303 container_of(work, struct mwl8k_priv, finalize_join_worker); 3882 container_of(work, struct mwl8k_priv, finalize_join_worker);
3304 struct sk_buff *skb = priv->beacon_skb; 3883 struct sk_buff *skb = priv->beacon_skb;
3305 u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period; 3884 struct mwl8k_vif *mwl8k_vif;
3306 3885
3307 mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim); 3886 mwl8k_vif = mwl8k_first_vif(priv);
3308 dev_kfree_skb(skb); 3887 if (mwl8k_vif != NULL)
3888 mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len,
3889 mwl8k_vif->vif->bss_conf.dtim_period);
3309 3890
3891 dev_kfree_skb(skb);
3310 priv->beacon_skb = NULL; 3892 priv->beacon_skb = NULL;
3311} 3893}
3312 3894
3313enum { 3895enum {
3314 MWL8687 = 0, 3896 MWL8363 = 0,
3897 MWL8687,
3315 MWL8366, 3898 MWL8366,
3316}; 3899};
3317 3900
3318static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = { 3901static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
3319 { 3902 [MWL8363] = {
3903 .part_name = "88w8363",
3904 .helper_image = "mwl8k/helper_8363.fw",
3905 .fw_image = "mwl8k/fmimage_8363.fw",
3906 },
3907 [MWL8687] = {
3320 .part_name = "88w8687", 3908 .part_name = "88w8687",
3321 .helper_image = "mwl8k/helper_8687.fw", 3909 .helper_image = "mwl8k/helper_8687.fw",
3322 .fw_image = "mwl8k/fmimage_8687.fw", 3910 .fw_image = "mwl8k/fmimage_8687.fw",
3323 .rxd_ops = &rxd_8687_ops,
3324 .modes = BIT(NL80211_IFTYPE_STATION),
3325 }, 3911 },
3326 { 3912 [MWL8366] = {
3327 .part_name = "88w8366", 3913 .part_name = "88w8366",
3328 .helper_image = "mwl8k/helper_8366.fw", 3914 .helper_image = "mwl8k/helper_8366.fw",
3329 .fw_image = "mwl8k/fmimage_8366.fw", 3915 .fw_image = "mwl8k/fmimage_8366.fw",
3330 .rxd_ops = &rxd_8366_ops, 3916 .ap_rxd_ops = &rxd_8366_ap_ops,
3331 .modes = 0,
3332 }, 3917 },
3333}; 3918};
3334 3919
3920MODULE_FIRMWARE("mwl8k/helper_8363.fw");
3921MODULE_FIRMWARE("mwl8k/fmimage_8363.fw");
3922MODULE_FIRMWARE("mwl8k/helper_8687.fw");
3923MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
3924MODULE_FIRMWARE("mwl8k/helper_8366.fw");
3925MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
3926
3335static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { 3927static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3928 { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
3929 { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
3336 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, 3930 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
3337 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, }, 3931 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
3338 { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, }, 3932 { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
3933 { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
3339 { }, 3934 { },
3340}; 3935};
3341MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table); 3936MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -3354,6 +3949,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3354 printed_version = 1; 3949 printed_version = 1;
3355 } 3950 }
3356 3951
3952
3357 rc = pci_enable_device(pdev); 3953 rc = pci_enable_device(pdev);
3358 if (rc) { 3954 if (rc) {
3359 printk(KERN_ERR "%s: Cannot enable new PCI device\n", 3955 printk(KERN_ERR "%s: Cannot enable new PCI device\n",
@@ -3370,6 +3966,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3370 3966
3371 pci_set_master(pdev); 3967 pci_set_master(pdev);
3372 3968
3969
3373 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops); 3970 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
3374 if (hw == NULL) { 3971 if (hw == NULL) {
3375 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME); 3972 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
@@ -3377,17 +3974,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3377 goto err_free_reg; 3974 goto err_free_reg;
3378 } 3975 }
3379 3976
3977 SET_IEEE80211_DEV(hw, &pdev->dev);
3978 pci_set_drvdata(pdev, hw);
3979
3380 priv = hw->priv; 3980 priv = hw->priv;
3381 priv->hw = hw; 3981 priv->hw = hw;
3382 priv->pdev = pdev; 3982 priv->pdev = pdev;
3383 priv->device_info = &mwl8k_info_tbl[id->driver_data]; 3983 priv->device_info = &mwl8k_info_tbl[id->driver_data];
3384 priv->rxd_ops = priv->device_info->rxd_ops;
3385 priv->sniffer_enabled = false;
3386 priv->wmm_enabled = false;
3387 priv->pending_tx_pkts = 0;
3388 3984
3389 SET_IEEE80211_DEV(hw, &pdev->dev);
3390 pci_set_drvdata(pdev, hw);
3391 3985
3392 priv->sram = pci_iomap(pdev, 0, 0x10000); 3986 priv->sram = pci_iomap(pdev, 0, 0x10000);
3393 if (priv->sram == NULL) { 3987 if (priv->sram == NULL) {
@@ -3410,16 +4004,46 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3410 } 4004 }
3411 } 4005 }
3412 4006
3413 memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
3414 priv->band.band = IEEE80211_BAND_2GHZ;
3415 priv->band.channels = priv->channels;
3416 priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
3417 priv->band.bitrates = priv->rates;
3418 priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
3419 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
3420 4007
3421 BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates)); 4008 /* Reset firmware and hardware */
3422 memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates)); 4009 mwl8k_hw_reset(priv);
4010
4011 /* Ask userland hotplug daemon for the device firmware */
4012 rc = mwl8k_request_firmware(priv);
4013 if (rc) {
4014 printk(KERN_ERR "%s: Firmware files not found\n",
4015 wiphy_name(hw->wiphy));
4016 goto err_stop_firmware;
4017 }
4018
4019 /* Load firmware into hardware */
4020 rc = mwl8k_load_firmware(hw);
4021 if (rc) {
4022 printk(KERN_ERR "%s: Cannot start firmware\n",
4023 wiphy_name(hw->wiphy));
4024 goto err_stop_firmware;
4025 }
4026
4027 /* Reclaim memory once firmware is successfully loaded */
4028 mwl8k_release_firmware(priv);
4029
4030
4031 if (priv->ap_fw) {
4032 priv->rxd_ops = priv->device_info->ap_rxd_ops;
4033 if (priv->rxd_ops == NULL) {
4034 printk(KERN_ERR "%s: Driver does not have AP "
4035 "firmware image support for this hardware\n",
4036 wiphy_name(hw->wiphy));
4037 goto err_stop_firmware;
4038 }
4039 } else {
4040 priv->rxd_ops = &rxd_sta_ops;
4041 }
4042
4043 priv->sniffer_enabled = false;
4044 priv->wmm_enabled = false;
4045 priv->pending_tx_pkts = 0;
4046
3423 4047
3424 /* 4048 /*
3425 * Extra headroom is the size of the required DMA header 4049 * Extra headroom is the size of the required DMA header
@@ -3432,33 +4056,40 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3432 4056
3433 hw->queues = MWL8K_TX_QUEUES; 4057 hw->queues = MWL8K_TX_QUEUES;
3434 4058
3435 hw->wiphy->interface_modes = priv->device_info->modes;
3436
3437 /* Set rssi and noise values to dBm */ 4059 /* Set rssi and noise values to dBm */
3438 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; 4060 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
3439 hw->vif_data_size = sizeof(struct mwl8k_vif); 4061 hw->vif_data_size = sizeof(struct mwl8k_vif);
3440 priv->vif = NULL; 4062 hw->sta_data_size = sizeof(struct mwl8k_sta);
4063
4064 priv->macids_used = 0;
4065 INIT_LIST_HEAD(&priv->vif_list);
3441 4066
3442 /* Set default radio state and preamble */ 4067 /* Set default radio state and preamble */
3443 priv->radio_on = 0; 4068 priv->radio_on = 0;
3444 priv->radio_short_preamble = 0; 4069 priv->radio_short_preamble = 0;
3445 4070
4071 /* Station database handling */
4072 INIT_WORK(&priv->sta_notify_worker, mwl8k_sta_notify_worker);
4073 spin_lock_init(&priv->sta_notify_list_lock);
4074 INIT_LIST_HEAD(&priv->sta_notify_list);
4075
3446 /* Finalize join worker */ 4076 /* Finalize join worker */
3447 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); 4077 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
3448 4078
3449 /* TX reclaim tasklet */ 4079 /* TX reclaim and RX tasklets. */
3450 tasklet_init(&priv->tx_reclaim_task, 4080 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
3451 mwl8k_tx_reclaim_handler, (unsigned long)hw); 4081 tasklet_disable(&priv->poll_tx_task);
3452 tasklet_disable(&priv->tx_reclaim_task); 4082 tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
4083 tasklet_disable(&priv->poll_rx_task);
3453 4084
3454 /* Power management cookie */ 4085 /* Power management cookie */
3455 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma); 4086 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
3456 if (priv->cookie == NULL) 4087 if (priv->cookie == NULL)
3457 goto err_iounmap; 4088 goto err_stop_firmware;
3458 4089
3459 rc = mwl8k_rxq_init(hw, 0); 4090 rc = mwl8k_rxq_init(hw, 0);
3460 if (rc) 4091 if (rc)
3461 goto err_iounmap; 4092 goto err_free_cookie;
3462 rxq_refill(hw, 0, INT_MAX); 4093 rxq_refill(hw, 0, INT_MAX);
3463 4094
3464 mutex_init(&priv->fw_mutex); 4095 mutex_init(&priv->fw_mutex);
@@ -3478,7 +4109,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3478 4109
3479 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 4110 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3480 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4111 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3481 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL); 4112 iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY,
4113 priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
3482 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); 4114 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
3483 4115
3484 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 4116 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
@@ -3489,31 +4121,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3489 goto err_free_queues; 4121 goto err_free_queues;
3490 } 4122 }
3491 4123
3492 /* Reset firmware and hardware */
3493 mwl8k_hw_reset(priv);
3494
3495 /* Ask userland hotplug daemon for the device firmware */
3496 rc = mwl8k_request_firmware(priv);
3497 if (rc) {
3498 printk(KERN_ERR "%s: Firmware files not found\n",
3499 wiphy_name(hw->wiphy));
3500 goto err_free_irq;
3501 }
3502
3503 /* Load firmware into hardware */
3504 rc = mwl8k_load_firmware(hw);
3505 if (rc) {
3506 printk(KERN_ERR "%s: Cannot start firmware\n",
3507 wiphy_name(hw->wiphy));
3508 goto err_stop_firmware;
3509 }
3510
3511 /* Reclaim memory once firmware is successfully loaded */
3512 mwl8k_release_firmware(priv);
3513
3514 /* 4124 /*
3515 * Temporarily enable interrupts. Initial firmware host 4125 * Temporarily enable interrupts. Initial firmware host
3516 * commands use interrupts and avoids polling. Disable 4126 * commands use interrupts and avoid polling. Disable
3517 * interrupts when done. 4127 * interrupts when done.
3518 */ 4128 */
3519 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4129 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
@@ -3529,22 +4139,29 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3529 if (rc) { 4139 if (rc) {
3530 printk(KERN_ERR "%s: Cannot initialise firmware\n", 4140 printk(KERN_ERR "%s: Cannot initialise firmware\n",
3531 wiphy_name(hw->wiphy)); 4141 wiphy_name(hw->wiphy));
3532 goto err_stop_firmware; 4142 goto err_free_irq;
3533 } 4143 }
3534 4144
4145 hw->wiphy->interface_modes = 0;
4146 if (priv->ap_macids_supported)
4147 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
4148 if (priv->sta_macids_supported)
4149 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
4150
4151
3535 /* Turn radio off */ 4152 /* Turn radio off */
3536 rc = mwl8k_cmd_802_11_radio_disable(hw); 4153 rc = mwl8k_cmd_radio_disable(hw);
3537 if (rc) { 4154 if (rc) {
3538 printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy)); 4155 printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
3539 goto err_stop_firmware; 4156 goto err_free_irq;
3540 } 4157 }
3541 4158
3542 /* Clear MAC address */ 4159 /* Clear MAC address */
3543 rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00"); 4160 rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
3544 if (rc) { 4161 if (rc) {
3545 printk(KERN_ERR "%s: Cannot clear MAC address\n", 4162 printk(KERN_ERR "%s: Cannot clear MAC address\n",
3546 wiphy_name(hw->wiphy)); 4163 wiphy_name(hw->wiphy));
3547 goto err_stop_firmware; 4164 goto err_free_irq;
3548 } 4165 }
3549 4166
3550 /* Disable interrupts */ 4167 /* Disable interrupts */
@@ -3555,7 +4172,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3555 if (rc) { 4172 if (rc) {
3556 printk(KERN_ERR "%s: Cannot register device\n", 4173 printk(KERN_ERR "%s: Cannot register device\n",
3557 wiphy_name(hw->wiphy)); 4174 wiphy_name(hw->wiphy));
3558 goto err_stop_firmware; 4175 goto err_free_queues;
3559 } 4176 }
3560 4177
3561 printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n", 4178 printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
@@ -3567,10 +4184,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3567 4184
3568 return 0; 4185 return 0;
3569 4186
3570err_stop_firmware:
3571 mwl8k_hw_reset(priv);
3572 mwl8k_release_firmware(priv);
3573
3574err_free_irq: 4187err_free_irq:
3575 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4188 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3576 free_irq(priv->pdev->irq, hw); 4189 free_irq(priv->pdev->irq, hw);
@@ -3580,11 +4193,16 @@ err_free_queues:
3580 mwl8k_txq_deinit(hw, i); 4193 mwl8k_txq_deinit(hw, i);
3581 mwl8k_rxq_deinit(hw, 0); 4194 mwl8k_rxq_deinit(hw, 0);
3582 4195
3583err_iounmap: 4196err_free_cookie:
3584 if (priv->cookie != NULL) 4197 if (priv->cookie != NULL)
3585 pci_free_consistent(priv->pdev, 4, 4198 pci_free_consistent(priv->pdev, 4,
3586 priv->cookie, priv->cookie_dma); 4199 priv->cookie, priv->cookie_dma);
3587 4200
4201err_stop_firmware:
4202 mwl8k_hw_reset(priv);
4203 mwl8k_release_firmware(priv);
4204
4205err_iounmap:
3588 if (priv->regs != NULL) 4206 if (priv->regs != NULL)
3589 pci_iounmap(pdev, priv->regs); 4207 pci_iounmap(pdev, priv->regs);
3590 4208
@@ -3622,15 +4240,16 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3622 4240
3623 ieee80211_unregister_hw(hw); 4241 ieee80211_unregister_hw(hw);
3624 4242
3625 /* Remove tx reclaim tasklet */ 4243 /* Remove TX reclaim and RX tasklets. */
3626 tasklet_kill(&priv->tx_reclaim_task); 4244 tasklet_kill(&priv->poll_tx_task);
4245 tasklet_kill(&priv->poll_rx_task);
3627 4246
3628 /* Stop hardware */ 4247 /* Stop hardware */
3629 mwl8k_hw_reset(priv); 4248 mwl8k_hw_reset(priv);
3630 4249
3631 /* Return all skbs to mac80211 */ 4250 /* Return all skbs to mac80211 */
3632 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4251 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3633 mwl8k_txq_reclaim(hw, i, 1); 4252 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
3634 4253
3635 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4254 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3636 mwl8k_txq_deinit(hw, i); 4255 mwl8k_txq_deinit(hw, i);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index c13a4c38341..075f446b313 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -274,7 +274,7 @@ static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
274 pci_disable_device(pdev); 274 pci_disable_device(pdev);
275} 275}
276 276
277static struct pci_device_id orinoco_nortel_id_table[] = { 277static DEFINE_PCI_DEVICE_TABLE(orinoco_nortel_id_table) = {
278 /* Nortel emobility PCI */ 278 /* Nortel emobility PCI */
279 {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,}, 279 {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
280 /* Symbol LA-4123 PCI */ 280 /* Symbol LA-4123 PCI */
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index fea7781948e..bda5317cc59 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -212,7 +212,7 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
212 pci_disable_device(pdev); 212 pci_disable_device(pdev);
213} 213}
214 214
215static struct pci_device_id orinoco_pci_id_table[] = { 215static DEFINE_PCI_DEVICE_TABLE(orinoco_pci_id_table) = {
216 /* Intersil Prism 3 */ 216 /* Intersil Prism 3 */
217 {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,}, 217 {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
218 /* Intersil Prism 2.5 */ 218 /* Intersil Prism 2.5 */
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 3f2942a1e4f..e0d5874ab42 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -310,7 +310,7 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
310 pci_disable_device(pdev); 310 pci_disable_device(pdev);
311} 311}
312 312
313static struct pci_device_id orinoco_plx_id_table[] = { 313static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = {
314 {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */ 314 {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
315 {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */ 315 {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
316 {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */ 316 {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index d3452548cc7..88cbc7902aa 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -203,7 +203,7 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
203 pci_disable_device(pdev); 203 pci_disable_device(pdev);
204} 204}
205 205
206static struct pci_device_id orinoco_tmd_id_table[] = { 206static DEFINE_PCI_DEVICE_TABLE(orinoco_tmd_id_table) = {
207 {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */ 207 {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
208 {0,}, 208 {0,},
209}; 209};
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 18012dbfb45..26428e4c9c6 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -216,7 +216,7 @@ static void p54_stop(struct ieee80211_hw *dev)
216} 216}
217 217
218static int p54_add_interface(struct ieee80211_hw *dev, 218static int p54_add_interface(struct ieee80211_hw *dev,
219 struct ieee80211_if_init_conf *conf) 219 struct ieee80211_vif *vif)
220{ 220{
221 struct p54_common *priv = dev->priv; 221 struct p54_common *priv = dev->priv;
222 222
@@ -226,28 +226,28 @@ static int p54_add_interface(struct ieee80211_hw *dev,
226 return -EOPNOTSUPP; 226 return -EOPNOTSUPP;
227 } 227 }
228 228
229 priv->vif = conf->vif; 229 priv->vif = vif;
230 230
231 switch (conf->type) { 231 switch (vif->type) {
232 case NL80211_IFTYPE_STATION: 232 case NL80211_IFTYPE_STATION:
233 case NL80211_IFTYPE_ADHOC: 233 case NL80211_IFTYPE_ADHOC:
234 case NL80211_IFTYPE_AP: 234 case NL80211_IFTYPE_AP:
235 case NL80211_IFTYPE_MESH_POINT: 235 case NL80211_IFTYPE_MESH_POINT:
236 priv->mode = conf->type; 236 priv->mode = vif->type;
237 break; 237 break;
238 default: 238 default:
239 mutex_unlock(&priv->conf_mutex); 239 mutex_unlock(&priv->conf_mutex);
240 return -EOPNOTSUPP; 240 return -EOPNOTSUPP;
241 } 241 }
242 242
243 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 243 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
244 p54_setup_mac(priv); 244 p54_setup_mac(priv);
245 mutex_unlock(&priv->conf_mutex); 245 mutex_unlock(&priv->conf_mutex);
246 return 0; 246 return 0;
247} 247}
248 248
249static void p54_remove_interface(struct ieee80211_hw *dev, 249static void p54_remove_interface(struct ieee80211_hw *dev,
250 struct ieee80211_if_init_conf *conf) 250 struct ieee80211_vif *vif)
251{ 251{
252 struct p54_common *priv = dev->priv; 252 struct p54_common *priv = dev->priv;
253 253
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a72f7c2577d..57c64659806 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL");
31MODULE_ALIAS("prism54pci"); 31MODULE_ALIAS("prism54pci");
32MODULE_FIRMWARE("isl3886pci"); 32MODULE_FIRMWARE("isl3886pci");
33 33
34static struct pci_device_id p54p_table[] __devinitdata = { 34static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
35 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ 35 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
36 { PCI_DEVICE(0x1260, 0x3890) }, 36 { PCI_DEVICE(0x1260, 0x3890) },
37 /* 3COM 3CRWE154G72 Wireless LAN adapter */ 37 /* 3COM 3CRWE154G72 Wireless LAN adapter */
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index e4f2bb7368f..dc14420a9ad 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -39,7 +39,7 @@ module_param(init_pcitm, int, 0);
39 * driver_data 39 * driver_data
40 * If you have an update for this please contact prism54-devel@prism54.org 40 * If you have an update for this please contact prism54-devel@prism54.org
41 * The latest list can be found at http://prism54.org/supported_cards.php */ 41 * The latest list can be found at http://prism54.org/supported_cards.php */
42static const struct pci_device_id prism54_id_tbl[] = { 42static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
43 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ 43 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
44 { 44 {
45 0x1260, 0x3890, 45 0x1260, 0x3890,
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2ecbedb26e1..305c106fdc1 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2594,23 +2594,9 @@ end:
2594/* 2594/*
2595 * driver/device initialization 2595 * driver/device initialization
2596 */ 2596 */
2597static int bcm4320a_early_init(struct usbnet *usbdev) 2597static void rndis_copy_module_params(struct usbnet *usbdev)
2598{
2599 /* bcm4320a doesn't handle configuration parameters well. Try
2600 * set any and you get partially zeroed mac and broken device.
2601 */
2602
2603 return 0;
2604}
2605
2606static int bcm4320b_early_init(struct usbnet *usbdev)
2607{ 2598{
2608 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2599 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2609 char buf[8];
2610
2611 /* Early initialization settings, setting these won't have effect
2612 * if called after generic_rndis_bind().
2613 */
2614 2600
2615 priv->param_country[0] = modparam_country[0]; 2601 priv->param_country[0] = modparam_country[0];
2616 priv->param_country[1] = modparam_country[1]; 2602 priv->param_country[1] = modparam_country[1];
@@ -2652,6 +2638,32 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
2652 priv->param_workaround_interval = 500; 2638 priv->param_workaround_interval = 500;
2653 else 2639 else
2654 priv->param_workaround_interval = modparam_workaround_interval; 2640 priv->param_workaround_interval = modparam_workaround_interval;
2641}
2642
2643static int bcm4320a_early_init(struct usbnet *usbdev)
2644{
2645 /* copy module parameters for bcm4320a so that iwconfig reports txpower
2646 * and workaround parameter is copied to private structure correctly.
2647 */
2648 rndis_copy_module_params(usbdev);
2649
2650 /* bcm4320a doesn't handle configuration parameters well. Try
2651 * set any and you get partially zeroed mac and broken device.
2652 */
2653
2654 return 0;
2655}
2656
2657static int bcm4320b_early_init(struct usbnet *usbdev)
2658{
2659 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2660 char buf[8];
2661
2662 rndis_copy_module_params(usbdev);
2663
2664 /* Early initialization settings, setting these won't have effect
2665 * if called after generic_rndis_bind().
2666 */
2655 2667
2656 rndis_set_config_parameter_str(usbdev, "Country", priv->param_country); 2668 rndis_set_config_parameter_str(usbdev, "Country", priv->param_country);
2657 rndis_set_config_parameter_str(usbdev, "FrameBursting", 2669 rndis_set_config_parameter_str(usbdev, "FrameBursting",
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index bf60689aaab..3ca824a91ad 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -54,12 +54,12 @@ config RT61PCI
54 When compiled as a module, this driver will be called rt61pci. 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2800PCI_PCI 56config RT2800PCI_PCI
57 tristate 57 boolean
58 depends on PCI 58 depends on PCI
59 default y 59 default y
60 60
61config RT2800PCI_SOC 61config RT2800PCI_SOC
62 tristate 62 boolean
63 depends on RALINK_RT288X || RALINK_RT305X 63 depends on RALINK_RT288X || RALINK_RT305X
64 default y 64 default y
65 65
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index e7f46405a41..aa579eb8723 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -451,7 +451,7 @@ static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev,
451 /* 451 /*
452 * RF2420 chipset don't need any additional actions. 452 * RF2420 chipset don't need any additional actions.
453 */ 453 */
454 if (rt2x00_rf(&rt2x00dev->chip, RF2420)) 454 if (rt2x00_rf(rt2x00dev, RF2420))
455 return; 455 return;
456 456
457 /* 457 /*
@@ -1343,8 +1343,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1343 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1343 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1344 rt2x00_print_chip(rt2x00dev); 1344 rt2x00_print_chip(rt2x00dev);
1345 1345
1346 if (!rt2x00_rf(&rt2x00dev->chip, RF2420) && 1346 if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) {
1347 !rt2x00_rf(&rt2x00dev->chip, RF2421)) {
1348 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1347 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1349 return -ENODEV; 1348 return -ENODEV;
1350 } 1349 }
@@ -1643,7 +1642,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
1643/* 1642/*
1644 * RT2400pci module information. 1643 * RT2400pci module information.
1645 */ 1644 */
1646static struct pci_device_id rt2400pci_device_table[] = { 1645static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = {
1647 { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) }, 1646 { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) },
1648 { 0, } 1647 { 0, }
1649}; 1648};
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 408fcfc120f..77ee1df7933 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -440,8 +440,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
440 /* 440 /*
441 * RT2525E and RT5222 need to flip TX I/Q 441 * RT2525E and RT5222 need to flip TX I/Q
442 */ 442 */
443 if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || 443 if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
444 rt2x00_rf(&rt2x00dev->chip, RF5222)) {
445 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); 444 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
446 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1); 445 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1);
447 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1); 446 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1);
@@ -449,7 +448,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
449 /* 448 /*
450 * RT2525E does not need RX I/Q Flip. 449 * RT2525E does not need RX I/Q Flip.
451 */ 450 */
452 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) 451 if (rt2x00_rf(rt2x00dev, RF2525E))
453 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); 452 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
454 } else { 453 } else {
455 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0); 454 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0);
@@ -475,14 +474,14 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
475 * Switch on tuning bits. 474 * Switch on tuning bits.
476 * For RT2523 devices we do not need to update the R1 register. 475 * For RT2523 devices we do not need to update the R1 register.
477 */ 476 */
478 if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) 477 if (!rt2x00_rf(rt2x00dev, RF2523))
479 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); 478 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1);
480 rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); 479 rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1);
481 480
482 /* 481 /*
483 * For RT2525 we should first set the channel to half band higher. 482 * For RT2525 we should first set the channel to half band higher.
484 */ 483 */
485 if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { 484 if (rt2x00_rf(rt2x00dev, RF2525)) {
486 static const u32 vals[] = { 485 static const u32 vals[] = {
487 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a, 486 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a,
488 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a, 487 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a,
@@ -516,7 +515,7 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
516 * Switch off tuning bits. 515 * Switch off tuning bits.
517 * For RT2523 devices we do not need to update the R1 register. 516 * For RT2523 devices we do not need to update the R1 register.
518 */ 517 */
519 if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) { 518 if (!rt2x00_rf(rt2x00dev, RF2523)) {
520 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); 519 rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0);
521 rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); 520 rt2500pci_rf_write(rt2x00dev, 1, rf->rf1);
522 } 521 }
@@ -640,7 +639,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
640 * up to version C the link tuning should halt after 20 639 * up to version C the link tuning should halt after 20
641 * seconds while being associated. 640 * seconds while being associated.
642 */ 641 */
643 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D && 642 if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D &&
644 rt2x00dev->intf_associated && count > 20) 643 rt2x00dev->intf_associated && count > 20)
645 return; 644 return;
646 645
@@ -650,7 +649,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
650 * should go straight to dynamic CCA tuning when they 649 * should go straight to dynamic CCA tuning when they
651 * are not associated. 650 * are not associated.
652 */ 651 */
653 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D || 652 if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D ||
654 !rt2x00dev->intf_associated) 653 !rt2x00dev->intf_associated)
655 goto dynamic_cca_tune; 654 goto dynamic_cca_tune;
656 655
@@ -1507,12 +1506,12 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1507 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1506 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1508 rt2x00_print_chip(rt2x00dev); 1507 rt2x00_print_chip(rt2x00dev);
1509 1508
1510 if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && 1509 if (!rt2x00_rf(rt2x00dev, RF2522) &&
1511 !rt2x00_rf(&rt2x00dev->chip, RF2523) && 1510 !rt2x00_rf(rt2x00dev, RF2523) &&
1512 !rt2x00_rf(&rt2x00dev->chip, RF2524) && 1511 !rt2x00_rf(rt2x00dev, RF2524) &&
1513 !rt2x00_rf(&rt2x00dev->chip, RF2525) && 1512 !rt2x00_rf(rt2x00dev, RF2525) &&
1514 !rt2x00_rf(&rt2x00dev->chip, RF2525E) && 1513 !rt2x00_rf(rt2x00dev, RF2525E) &&
1515 !rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1514 !rt2x00_rf(rt2x00dev, RF5222)) {
1516 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1515 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1517 return -ENODEV; 1516 return -ENODEV;
1518 } 1517 }
@@ -1744,22 +1743,22 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1744 spec->supported_bands = SUPPORT_BAND_2GHZ; 1743 spec->supported_bands = SUPPORT_BAND_2GHZ;
1745 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1744 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1746 1745
1747 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1746 if (rt2x00_rf(rt2x00dev, RF2522)) {
1748 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1747 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
1749 spec->channels = rf_vals_bg_2522; 1748 spec->channels = rf_vals_bg_2522;
1750 } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { 1749 } else if (rt2x00_rf(rt2x00dev, RF2523)) {
1751 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); 1750 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
1752 spec->channels = rf_vals_bg_2523; 1751 spec->channels = rf_vals_bg_2523;
1753 } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { 1752 } else if (rt2x00_rf(rt2x00dev, RF2524)) {
1754 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); 1753 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
1755 spec->channels = rf_vals_bg_2524; 1754 spec->channels = rf_vals_bg_2524;
1756 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { 1755 } else if (rt2x00_rf(rt2x00dev, RF2525)) {
1757 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); 1756 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
1758 spec->channels = rf_vals_bg_2525; 1757 spec->channels = rf_vals_bg_2525;
1759 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { 1758 } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
1760 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); 1759 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
1761 spec->channels = rf_vals_bg_2525e; 1760 spec->channels = rf_vals_bg_2525e;
1762 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1761 } else if (rt2x00_rf(rt2x00dev, RF5222)) {
1763 spec->supported_bands |= SUPPORT_BAND_5GHZ; 1762 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1764 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1763 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1765 spec->channels = rf_vals_5222; 1764 spec->channels = rf_vals_5222;
@@ -1941,7 +1940,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
1941/* 1940/*
1942 * RT2500pci module information. 1941 * RT2500pci module information.
1943 */ 1942 */
1944static struct pci_device_id rt2500pci_device_table[] = { 1943static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = {
1945 { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) }, 1944 { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) },
1946 { 0, } 1945 { 0, }
1947}; 1946};
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 83f2592c59d..9e6f865c57f 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -565,8 +565,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
565 /* 565 /*
566 * RT2525E and RT5222 need to flip TX I/Q 566 * RT2525E and RT5222 need to flip TX I/Q
567 */ 567 */
568 if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || 568 if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
569 rt2x00_rf(&rt2x00dev->chip, RF5222)) {
570 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); 569 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
571 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); 570 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1);
572 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); 571 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1);
@@ -574,7 +573,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
574 /* 573 /*
575 * RT2525E does not need RX I/Q Flip. 574 * RT2525E does not need RX I/Q Flip.
576 */ 575 */
577 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) 576 if (rt2x00_rf(rt2x00dev, RF2525E))
578 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); 577 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
579 } else { 578 } else {
580 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); 579 rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0);
@@ -598,7 +597,7 @@ static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
598 /* 597 /*
599 * For RT2525E we should first set the channel to half band higher. 598 * For RT2525E we should first set the channel to half band higher.
600 */ 599 */
601 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { 600 if (rt2x00_rf(rt2x00dev, RF2525E)) {
602 static const u32 vals[] = { 601 static const u32 vals[] = {
603 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, 602 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
604 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, 603 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
@@ -793,7 +792,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
793 rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1); 792 rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1);
794 rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); 793 rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
795 794
796 if (rt2x00_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) { 795 if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) {
797 rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg); 796 rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg);
798 rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0); 797 rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0);
799 } else { 798 } else {
@@ -1411,19 +1410,18 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1411 rt2x00_set_chip(rt2x00dev, RT2570, value, reg); 1410 rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
1412 rt2x00_print_chip(rt2x00dev); 1411 rt2x00_print_chip(rt2x00dev);
1413 1412
1414 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) || 1413 if (!rt2x00_check_rev(rt2x00dev, 0x000ffff0, 0) ||
1415 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) { 1414 rt2x00_check_rev(rt2x00dev, 0x0000000f, 0)) {
1416
1417 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1415 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
1418 return -ENODEV; 1416 return -ENODEV;
1419 } 1417 }
1420 1418
1421 if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && 1419 if (!rt2x00_rf(rt2x00dev, RF2522) &&
1422 !rt2x00_rf(&rt2x00dev->chip, RF2523) && 1420 !rt2x00_rf(rt2x00dev, RF2523) &&
1423 !rt2x00_rf(&rt2x00dev->chip, RF2524) && 1421 !rt2x00_rf(rt2x00dev, RF2524) &&
1424 !rt2x00_rf(&rt2x00dev->chip, RF2525) && 1422 !rt2x00_rf(rt2x00dev, RF2525) &&
1425 !rt2x00_rf(&rt2x00dev->chip, RF2525E) && 1423 !rt2x00_rf(rt2x00dev, RF2525E) &&
1426 !rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1424 !rt2x00_rf(rt2x00dev, RF5222)) {
1427 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1425 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1428 return -ENODEV; 1426 return -ENODEV;
1429 } 1427 }
@@ -1667,22 +1665,22 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1667 spec->supported_bands = SUPPORT_BAND_2GHZ; 1665 spec->supported_bands = SUPPORT_BAND_2GHZ;
1668 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1666 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1669 1667
1670 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1668 if (rt2x00_rf(rt2x00dev, RF2522)) {
1671 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1669 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
1672 spec->channels = rf_vals_bg_2522; 1670 spec->channels = rf_vals_bg_2522;
1673 } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { 1671 } else if (rt2x00_rf(rt2x00dev, RF2523)) {
1674 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); 1672 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
1675 spec->channels = rf_vals_bg_2523; 1673 spec->channels = rf_vals_bg_2523;
1676 } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { 1674 } else if (rt2x00_rf(rt2x00dev, RF2524)) {
1677 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); 1675 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
1678 spec->channels = rf_vals_bg_2524; 1676 spec->channels = rf_vals_bg_2524;
1679 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { 1677 } else if (rt2x00_rf(rt2x00dev, RF2525)) {
1680 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); 1678 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
1681 spec->channels = rf_vals_bg_2525; 1679 spec->channels = rf_vals_bg_2525;
1682 } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { 1680 } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
1683 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); 1681 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
1684 spec->channels = rf_vals_bg_2525e; 1682 spec->channels = rf_vals_bg_2525e;
1685 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1683 } else if (rt2x00_rf(rt2x00dev, RF5222)) {
1686 spec->supported_bands |= SUPPORT_BAND_5GHZ; 1684 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1687 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1685 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1688 spec->channels = rf_vals_5222; 1686 spec->channels = rf_vals_5222;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 9deae41cb78..529a37364eb 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,7 +37,7 @@
37#include <linux/module.h> 37#include <linux/module.h>
38 38
39#include "rt2x00.h" 39#include "rt2x00.h"
40#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE) 40#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
41#include "rt2x00usb.h" 41#include "rt2x00usb.h"
42#endif 42#endif
43#include "rt2800lib.h" 43#include "rt2800lib.h"
@@ -220,8 +220,7 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
220 /* 220 /*
221 * RT2880 and RT3052 don't support MCU requests. 221 * RT2880 and RT3052 don't support MCU requests.
222 */ 222 */
223 if (rt2x00_rt(&rt2x00dev->chip, RT2880) || 223 if (rt2x00_rt(rt2x00dev, RT2880) || rt2x00_rt(rt2x00dev, RT3052))
224 rt2x00_rt(&rt2x00dev->chip, RT3052))
225 return; 224 return;
226 225
227 mutex_lock(&rt2x00dev->csr_mutex); 226 mutex_lock(&rt2x00dev->csr_mutex);
@@ -246,6 +245,25 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
246} 245}
247EXPORT_SYMBOL_GPL(rt2800_mcu_request); 246EXPORT_SYMBOL_GPL(rt2800_mcu_request);
248 247
248int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
249{
250 unsigned int i;
251 u32 reg;
252
253 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
254 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
255 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
256 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
257 return 0;
258
259 msleep(1);
260 }
261
262 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
263 return -EACCES;
264}
265EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
266
249#ifdef CONFIG_RT2X00_LIB_DEBUGFS 267#ifdef CONFIG_RT2X00_LIB_DEBUGFS
250const struct rt2x00debug rt2800_rt2x00debug = { 268const struct rt2x00debug rt2800_rt2x00debug = {
251 .owner = THIS_MODULE, 269 .owner = THIS_MODULE,
@@ -348,7 +366,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
348 return 0; 366 return 0;
349} 367}
350 368
351void rt2800_init_led(struct rt2x00_dev *rt2x00dev, 369static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
352 struct rt2x00_led *led, enum led_type type) 370 struct rt2x00_led *led, enum led_type type)
353{ 371{
354 led->rt2x00dev = rt2x00dev; 372 led->rt2x00dev = rt2x00dev;
@@ -357,7 +375,6 @@ void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
357 led->led_dev.blink_set = rt2800_blink_set; 375 led->led_dev.blink_set = rt2800_blink_set;
358 led->flags = LED_INITIALIZED; 376 led->flags = LED_INITIALIZED;
359} 377}
360EXPORT_SYMBOL_GPL(rt2800_init_led);
361#endif /* CONFIG_RT2X00_LIB_LEDS */ 378#endif /* CONFIG_RT2X00_LIB_LEDS */
362 379
363/* 380/*
@@ -806,12 +823,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
806 unsigned int tx_pin; 823 unsigned int tx_pin;
807 u8 bbp; 824 u8 bbp;
808 825
809 if ((rt2x00_rt(&rt2x00dev->chip, RT3070) || 826 if ((rt2x00_rt(rt2x00dev, RT3070) ||
810 rt2x00_rt(&rt2x00dev->chip, RT3090)) && 827 rt2x00_rt(rt2x00dev, RT3090)) &&
811 (rt2x00_rf(&rt2x00dev->chip, RF2020) || 828 (rt2x00_rf(rt2x00dev, RF2020) ||
812 rt2x00_rf(&rt2x00dev->chip, RF3020) || 829 rt2x00_rf(rt2x00dev, RF3020) ||
813 rt2x00_rf(&rt2x00dev->chip, RF3021) || 830 rt2x00_rf(rt2x00dev, RF3021) ||
814 rt2x00_rf(&rt2x00dev->chip, RF3022))) 831 rt2x00_rf(rt2x00dev, RF3022)))
815 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info); 832 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
816 else 833 else
817 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info); 834 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
@@ -878,7 +895,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
878 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf)); 895 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
879 rt2800_bbp_write(rt2x00dev, 3, bbp); 896 rt2800_bbp_write(rt2x00dev, 3, bbp);
880 897
881 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) { 898 if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION) {
882 if (conf_is_ht40(conf)) { 899 if (conf_is_ht40(conf)) {
883 rt2800_bbp_write(rt2x00dev, 69, 0x1a); 900 rt2800_bbp_write(rt2x00dev, 69, 0x1a);
884 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 901 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -1041,7 +1058,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1041{ 1058{
1042 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 1059 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1043 if (rt2x00_intf_is_usb(rt2x00dev) && 1060 if (rt2x00_intf_is_usb(rt2x00dev) &&
1044 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) 1061 rt2x00_rev(rt2x00dev) == RT3070_VERSION)
1045 return 0x1c + (2 * rt2x00dev->lna_gain); 1062 return 0x1c + (2 * rt2x00dev->lna_gain);
1046 else 1063 else
1047 return 0x2e + rt2x00dev->lna_gain; 1064 return 0x2e + rt2x00dev->lna_gain;
@@ -1072,7 +1089,7 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
1072void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 1089void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
1073 const u32 count) 1090 const u32 count)
1074{ 1091{
1075 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) 1092 if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)
1076 return; 1093 return;
1077 1094
1078 /* 1095 /*
@@ -1121,7 +1138,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1121 1138
1122 if (rt2x00_intf_is_usb(rt2x00dev)) { 1139 if (rt2x00_intf_is_usb(rt2x00dev)) {
1123 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); 1140 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1124#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE) 1141#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
1125 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, 1142 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1126 USB_MODE_RESET, REGISTER_TIMEOUT); 1143 USB_MODE_RESET, REGISTER_TIMEOUT);
1127#endif 1144#endif
@@ -1158,7 +1175,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1158 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1175 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1159 1176
1160 if (rt2x00_intf_is_usb(rt2x00dev) && 1177 if (rt2x00_intf_is_usb(rt2x00dev) &&
1161 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) { 1178 rt2x00_rev(rt2x00dev) == RT3070_VERSION) {
1162 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 1179 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1163 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 1180 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1164 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 1181 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -1185,8 +1202,8 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1185 1202
1186 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg); 1203 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1187 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); 1204 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1188 if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION && 1205 if (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION &&
1189 rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION) 1206 rt2x00_rev(rt2x00dev) < RT3070_VERSION)
1190 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2); 1207 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1191 else 1208 else
1192 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1); 1209 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1465,22 +1482,22 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1465 rt2800_bbp_write(rt2x00dev, 103, 0x00); 1482 rt2800_bbp_write(rt2x00dev, 103, 0x00);
1466 rt2800_bbp_write(rt2x00dev, 105, 0x05); 1483 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1467 1484
1468 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) { 1485 if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION) {
1469 rt2800_bbp_write(rt2x00dev, 69, 0x16); 1486 rt2800_bbp_write(rt2x00dev, 69, 0x16);
1470 rt2800_bbp_write(rt2x00dev, 73, 0x12); 1487 rt2800_bbp_write(rt2x00dev, 73, 0x12);
1471 } 1488 }
1472 1489
1473 if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION) 1490 if (rt2x00_rev(rt2x00dev) > RT2860D_VERSION)
1474 rt2800_bbp_write(rt2x00dev, 84, 0x19); 1491 rt2800_bbp_write(rt2x00dev, 84, 0x19);
1475 1492
1476 if (rt2x00_intf_is_usb(rt2x00dev) && 1493 if (rt2x00_intf_is_usb(rt2x00dev) &&
1477 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) { 1494 rt2x00_rev(rt2x00dev) == RT3070_VERSION) {
1478 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 1495 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1479 rt2800_bbp_write(rt2x00dev, 84, 0x99); 1496 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1480 rt2800_bbp_write(rt2x00dev, 105, 0x05); 1497 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1481 } 1498 }
1482 1499
1483 if (rt2x00_rt(&rt2x00dev->chip, RT3052)) { 1500 if (rt2x00_rt(rt2x00dev, RT3052)) {
1484 rt2800_bbp_write(rt2x00dev, 31, 0x08); 1501 rt2800_bbp_write(rt2x00dev, 31, 0x08);
1485 rt2800_bbp_write(rt2x00dev, 78, 0x0e); 1502 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
1486 rt2800_bbp_write(rt2x00dev, 80, 0x08); 1503 rt2800_bbp_write(rt2x00dev, 80, 0x08);
@@ -1566,13 +1583,13 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1566 u8 bbp; 1583 u8 bbp;
1567 1584
1568 if (rt2x00_intf_is_usb(rt2x00dev) && 1585 if (rt2x00_intf_is_usb(rt2x00dev) &&
1569 rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION) 1586 rt2x00_rev(rt2x00dev) != RT3070_VERSION)
1570 return 0; 1587 return 0;
1571 1588
1572 if (rt2x00_intf_is_pci(rt2x00dev)) { 1589 if (rt2x00_intf_is_pci(rt2x00dev)) {
1573 if (!rt2x00_rf(&rt2x00dev->chip, RF3020) && 1590 if (!rt2x00_rf(rt2x00dev, RF3020) &&
1574 !rt2x00_rf(&rt2x00dev->chip, RF3021) && 1591 !rt2x00_rf(rt2x00dev, RF3021) &&
1575 !rt2x00_rf(&rt2x00dev->chip, RF3022)) 1592 !rt2x00_rf(rt2x00dev, RF3022))
1576 return 0; 1593 return 0;
1577 } 1594 }
1578 1595
@@ -1737,7 +1754,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1737 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820); 1754 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
1738 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 1755 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1739 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 1756 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
1740 } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) { 1757 } else if (rt2x00_rev(rt2x00dev) < RT2883_VERSION) {
1741 /* 1758 /*
1742 * There is a max of 2 RX streams for RT28x0 series 1759 * There is a max of 2 RX streams for RT28x0 series
1743 */ 1760 */
@@ -1839,17 +1856,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
1839 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1856 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1840 1857
1841 if (rt2x00_intf_is_usb(rt2x00dev)) { 1858 if (rt2x00_intf_is_usb(rt2x00dev)) {
1842 struct rt2x00_chip *chip = &rt2x00dev->chip;
1843
1844 /* 1859 /*
1845 * The check for rt2860 is not a typo, some rt2870 hardware 1860 * The check for rt2860 is not a typo, some rt2870 hardware
1846 * identifies itself as rt2860 in the CSR register. 1861 * identifies itself as rt2860 in the CSR register.
1847 */ 1862 */
1848 if (rt2x00_check_rev(chip, 0xfff00000, 0x28600000) || 1863 if (rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28600000) ||
1849 rt2x00_check_rev(chip, 0xfff00000, 0x28700000) || 1864 rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28700000) ||
1850 rt2x00_check_rev(chip, 0xfff00000, 0x28800000)) { 1865 rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28800000)) {
1851 rt2x00_set_chip_rt(rt2x00dev, RT2870); 1866 rt2x00_set_chip_rt(rt2x00dev, RT2870);
1852 } else if (rt2x00_check_rev(chip, 0xffff0000, 0x30700000)) { 1867 } else if (rt2x00_check_rev(rt2x00dev, 0xffff0000, 0x30700000)) {
1853 rt2x00_set_chip_rt(rt2x00dev, RT3070); 1868 rt2x00_set_chip_rt(rt2x00dev, RT3070);
1854 } else { 1869 } else {
1855 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1870 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
@@ -1858,14 +1873,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
1858 } 1873 }
1859 rt2x00_print_chip(rt2x00dev); 1874 rt2x00_print_chip(rt2x00dev);
1860 1875
1861 if (!rt2x00_rf(&rt2x00dev->chip, RF2820) && 1876 if (!rt2x00_rf(rt2x00dev, RF2820) &&
1862 !rt2x00_rf(&rt2x00dev->chip, RF2850) && 1877 !rt2x00_rf(rt2x00dev, RF2850) &&
1863 !rt2x00_rf(&rt2x00dev->chip, RF2720) && 1878 !rt2x00_rf(rt2x00dev, RF2720) &&
1864 !rt2x00_rf(&rt2x00dev->chip, RF2750) && 1879 !rt2x00_rf(rt2x00dev, RF2750) &&
1865 !rt2x00_rf(&rt2x00dev->chip, RF3020) && 1880 !rt2x00_rf(rt2x00dev, RF3020) &&
1866 !rt2x00_rf(&rt2x00dev->chip, RF2020) && 1881 !rt2x00_rf(rt2x00dev, RF2020) &&
1867 !rt2x00_rf(&rt2x00dev->chip, RF3021) && 1882 !rt2x00_rf(rt2x00dev, RF3021) &&
1868 !rt2x00_rf(&rt2x00dev->chip, RF3022)) { 1883 !rt2x00_rf(rt2x00dev, RF3022) &&
1884 !rt2x00_rf(rt2x00dev, RF3052)) {
1869 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1885 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1870 return -ENODEV; 1886 return -ENODEV;
1871 } 1887 }
@@ -2013,7 +2029,6 @@ static const struct rf_channel rf_vals_302x[] = {
2013 2029
2014int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 2030int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2015{ 2031{
2016 struct rt2x00_chip *chip = &rt2x00dev->chip;
2017 struct hw_mode_spec *spec = &rt2x00dev->spec; 2032 struct hw_mode_spec *spec = &rt2x00dev->spec;
2018 struct channel_info *info; 2033 struct channel_info *info;
2019 char *tx_power1; 2034 char *tx_power1;
@@ -2049,19 +2064,19 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2049 spec->supported_bands = SUPPORT_BAND_2GHZ; 2064 spec->supported_bands = SUPPORT_BAND_2GHZ;
2050 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2065 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2051 2066
2052 if (rt2x00_rf(chip, RF2820) || 2067 if (rt2x00_rf(rt2x00dev, RF2820) ||
2053 rt2x00_rf(chip, RF2720) || 2068 rt2x00_rf(rt2x00dev, RF2720) ||
2054 (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(chip, RF3052))) { 2069 rt2x00_rf(rt2x00dev, RF3052)) {
2055 spec->num_channels = 14; 2070 spec->num_channels = 14;
2056 spec->channels = rf_vals; 2071 spec->channels = rf_vals;
2057 } else if (rt2x00_rf(chip, RF2850) || rt2x00_rf(chip, RF2750)) { 2072 } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
2058 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2073 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2059 spec->num_channels = ARRAY_SIZE(rf_vals); 2074 spec->num_channels = ARRAY_SIZE(rf_vals);
2060 spec->channels = rf_vals; 2075 spec->channels = rf_vals;
2061 } else if (rt2x00_rf(chip, RF3020) || 2076 } else if (rt2x00_rf(rt2x00dev, RF3020) ||
2062 rt2x00_rf(chip, RF2020) || 2077 rt2x00_rf(rt2x00dev, RF2020) ||
2063 rt2x00_rf(chip, RF3021) || 2078 rt2x00_rf(rt2x00dev, RF3021) ||
2064 rt2x00_rf(chip, RF3022)) { 2079 rt2x00_rf(rt2x00dev, RF3022)) {
2065 spec->num_channels = ARRAY_SIZE(rf_vals_302x); 2080 spec->num_channels = ARRAY_SIZE(rf_vals_302x);
2066 spec->channels = rf_vals_302x; 2081 spec->channels = rf_vals_302x;
2067 } 2082 }
@@ -2069,7 +2084,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2069 /* 2084 /*
2070 * Initialize HT information. 2085 * Initialize HT information.
2071 */ 2086 */
2072 if (!rt2x00_rf(chip, RF2020)) 2087 if (!rt2x00_rf(rt2x00dev, RF2020))
2073 spec->ht.ht_supported = true; 2088 spec->ht.ht_supported = true;
2074 else 2089 else
2075 spec->ht.ht_supported = false; 2090 spec->ht.ht_supported = false;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 535ce22f2ac..ebabeae62d1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -114,8 +114,6 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
114extern const struct rt2x00debug rt2800_rt2x00debug; 114extern const struct rt2x00debug rt2800_rt2x00debug;
115 115
116int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev); 116int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
117void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
118 struct rt2x00_led *led, enum led_type type);
119int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, 117int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
120 struct rt2x00lib_crypto *crypto, 118 struct rt2x00lib_crypto *crypto,
121 struct ieee80211_key_conf *key); 119 struct ieee80211_key_conf *key);
@@ -139,6 +137,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
139int rt2800_init_registers(struct rt2x00_dev *rt2x00dev); 137int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
140int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev); 138int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
141int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev); 139int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
140int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
142 141
143int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); 142int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
144void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); 143void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index dfc886fcb44..d64181cbc9c 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -48,14 +48,6 @@
48#include "rt2800.h" 48#include "rt2800.h"
49#include "rt2800pci.h" 49#include "rt2800pci.h"
50 50
51#ifdef CONFIG_RT2800PCI_PCI_MODULE
52#define CONFIG_RT2800PCI_PCI
53#endif
54
55#ifdef CONFIG_RT2800PCI_WISOC_MODULE
56#define CONFIG_RT2800PCI_WISOC
57#endif
58
59/* 51/*
60 * Allow hardware encryption to be disabled. 52 * Allow hardware encryption to be disabled.
61 */ 53 */
@@ -87,7 +79,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
87 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 79 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
88} 80}
89 81
90#ifdef CONFIG_RT2800PCI_WISOC 82#ifdef CONFIG_RT2800PCI_SOC
91static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 83static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
92{ 84{
93 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */ 85 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
@@ -98,7 +90,7 @@ static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
98static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 90static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
99{ 91{
100} 92}
101#endif /* CONFIG_RT2800PCI_WISOC */ 93#endif /* CONFIG_RT2800PCI_SOC */
102 94
103#ifdef CONFIG_RT2800PCI_PCI 95#ifdef CONFIG_RT2800PCI_PCI
104static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 96static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -461,24 +453,6 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
461 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 453 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
462} 454}
463 455
464static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
465{
466 unsigned int i;
467 u32 reg;
468
469 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
470 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
471 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
472 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
473 return 0;
474
475 msleep(1);
476 }
477
478 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
479 return -EACCES;
480}
481
482static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) 456static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
483{ 457{
484 u32 reg; 458 u32 reg;
@@ -487,10 +461,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
487 /* 461 /*
488 * Initialize all registers. 462 * Initialize all registers.
489 */ 463 */
490 if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) || 464 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
491 rt2800pci_init_queues(rt2x00dev) || 465 rt2800pci_init_queues(rt2x00dev) ||
492 rt2800_init_registers(rt2x00dev) || 466 rt2800_init_registers(rt2x00dev) ||
493 rt2800pci_wait_wpdma_ready(rt2x00dev) || 467 rt2800_wait_wpdma_ready(rt2x00dev) ||
494 rt2800_init_bbp(rt2x00dev) || 468 rt2800_init_bbp(rt2x00dev) ||
495 rt2800_init_rfcsr(rt2x00dev))) 469 rt2800_init_rfcsr(rt2x00dev)))
496 return -EIO; 470 return -EIO;
@@ -570,7 +544,7 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
570 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 544 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
571 545
572 /* Wait for DMA, ignore error */ 546 /* Wait for DMA, ignore error */
573 rt2800pci_wait_wpdma_ready(rt2x00dev); 547 rt2800_wait_wpdma_ready(rt2x00dev);
574} 548}
575 549
576static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, 550static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -835,7 +809,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
835 struct rxdone_entry_desc *rxdesc) 809 struct rxdone_entry_desc *rxdesc)
836{ 810{
837 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 811 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
838 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
839 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 812 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
840 __le32 *rxd = entry_priv->desc; 813 __le32 *rxd = entry_priv->desc;
841 __le32 *rxwi = (__le32 *)entry->skb->data; 814 __le32 *rxwi = (__le32 *)entry->skb->data;
@@ -883,10 +856,8 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
883 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS)) 856 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
884 rxdesc->dev_flags |= RXDONE_MY_BSS; 857 rxdesc->dev_flags |= RXDONE_MY_BSS;
885 858
886 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) { 859 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
887 rxdesc->dev_flags |= RXDONE_L2PAD; 860 rxdesc->dev_flags |= RXDONE_L2PAD;
888 skbdesc->flags |= SKBDESC_L2_PADDED;
889 }
890 861
891 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI)) 862 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
892 rxdesc->flags |= RX_FLAG_SHORT_GI; 863 rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -927,7 +898,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
927 * Remove TXWI descriptor from start of buffer. 898 * Remove TXWI descriptor from start of buffer.
928 */ 899 */
929 skb_pull(entry->skb, RXWI_DESC_SIZE); 900 skb_pull(entry->skb, RXWI_DESC_SIZE);
930 skb_trim(entry->skb, rxdesc->size);
931} 901}
932 902
933/* 903/*
@@ -1133,8 +1103,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1133 /* 1103 /*
1134 * This device requires firmware. 1104 * This device requires firmware.
1135 */ 1105 */
1136 if (!rt2x00_rt(&rt2x00dev->chip, RT2880) && 1106 if (!rt2x00_rt(rt2x00dev, RT2880) && !rt2x00_rt(rt2x00dev, RT3052))
1137 !rt2x00_rt(&rt2x00dev->chip, RT3052))
1138 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 1107 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
1139 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 1108 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
1140 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 1109 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
@@ -1221,7 +1190,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
1221/* 1190/*
1222 * RT2800pci module information. 1191 * RT2800pci module information.
1223 */ 1192 */
1224static struct pci_device_id rt2800pci_device_table[] = { 1193static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1225 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1194 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
1226 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1195 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
1227 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1196 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1255,7 +1224,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1255#endif /* CONFIG_RT2800PCI_PCI */ 1224#endif /* CONFIG_RT2800PCI_PCI */
1256MODULE_LICENSE("GPL"); 1225MODULE_LICENSE("GPL");
1257 1226
1258#ifdef CONFIG_RT2800PCI_WISOC 1227#ifdef CONFIG_RT2800PCI_SOC
1259#if defined(CONFIG_RALINK_RT288X) 1228#if defined(CONFIG_RALINK_RT288X)
1260__rt2x00soc_probe(RT2880, &rt2800pci_ops); 1229__rt2x00soc_probe(RT2880, &rt2800pci_ops);
1261#elif defined(CONFIG_RALINK_RT305X) 1230#elif defined(CONFIG_RALINK_RT305X)
@@ -1273,7 +1242,7 @@ static struct platform_driver rt2800soc_driver = {
1273 .suspend = rt2x00soc_suspend, 1242 .suspend = rt2x00soc_suspend,
1274 .resume = rt2x00soc_resume, 1243 .resume = rt2x00soc_resume,
1275}; 1244};
1276#endif /* CONFIG_RT2800PCI_WISOC */ 1245#endif /* CONFIG_RT2800PCI_SOC */
1277 1246
1278#ifdef CONFIG_RT2800PCI_PCI 1247#ifdef CONFIG_RT2800PCI_PCI
1279static struct pci_driver rt2800pci_driver = { 1248static struct pci_driver rt2800pci_driver = {
@@ -1290,7 +1259,7 @@ static int __init rt2800pci_init(void)
1290{ 1259{
1291 int ret = 0; 1260 int ret = 0;
1292 1261
1293#ifdef CONFIG_RT2800PCI_WISOC 1262#ifdef CONFIG_RT2800PCI_SOC
1294 ret = platform_driver_register(&rt2800soc_driver); 1263 ret = platform_driver_register(&rt2800soc_driver);
1295 if (ret) 1264 if (ret)
1296 return ret; 1265 return ret;
@@ -1298,7 +1267,7 @@ static int __init rt2800pci_init(void)
1298#ifdef CONFIG_RT2800PCI_PCI 1267#ifdef CONFIG_RT2800PCI_PCI
1299 ret = pci_register_driver(&rt2800pci_driver); 1268 ret = pci_register_driver(&rt2800pci_driver);
1300 if (ret) { 1269 if (ret) {
1301#ifdef CONFIG_RT2800PCI_WISOC 1270#ifdef CONFIG_RT2800PCI_SOC
1302 platform_driver_unregister(&rt2800soc_driver); 1271 platform_driver_unregister(&rt2800soc_driver);
1303#endif 1272#endif
1304 return ret; 1273 return ret;
@@ -1313,7 +1282,7 @@ static void __exit rt2800pci_exit(void)
1313#ifdef CONFIG_RT2800PCI_PCI 1282#ifdef CONFIG_RT2800PCI_PCI
1314 pci_unregister_driver(&rt2800pci_driver); 1283 pci_unregister_driver(&rt2800pci_driver);
1315#endif 1284#endif
1316#ifdef CONFIG_RT2800PCI_WISOC 1285#ifdef CONFIG_RT2800PCI_SOC
1317 platform_driver_unregister(&rt2800soc_driver); 1286 platform_driver_unregister(&rt2800soc_driver);
1318#endif 1287#endif
1319} 1288}
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ab95346cf6a..82755cf8b73 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -92,7 +92,7 @@ static bool rt2800usb_check_crc(const u8 *data, const size_t len)
92static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev, 92static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
93 const u8 *data, const size_t len) 93 const u8 *data, const size_t len)
94{ 94{
95 u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff; 95 u16 chipset = (rt2x00_rev(rt2x00dev) >> 16) & 0xffff;
96 size_t offset = 0; 96 size_t offset = 0;
97 97
98 /* 98 /*
@@ -138,7 +138,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
138 u32 reg; 138 u32 reg;
139 u32 offset; 139 u32 offset;
140 u32 length; 140 u32 length;
141 u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff; 141 u16 chipset = (rt2x00_rev(rt2x00dev) >> 16) & 0xffff;
142 142
143 /* 143 /*
144 * Check which section of the firmware we need. 144 * Check which section of the firmware we need.
@@ -248,24 +248,6 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
248 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 248 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
249} 249}
250 250
251static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
252{
253 unsigned int i;
254 u32 reg;
255
256 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
257 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
258 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
259 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
260 return 0;
261
262 msleep(1);
263 }
264
265 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
266 return -EACCES;
267}
268
269static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) 251static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
270{ 252{
271 u32 reg; 253 u32 reg;
@@ -274,7 +256,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
274 /* 256 /*
275 * Initialize all registers. 257 * Initialize all registers.
276 */ 258 */
277 if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) || 259 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
278 rt2800_init_registers(rt2x00dev) || 260 rt2800_init_registers(rt2x00dev) ||
279 rt2800_init_bbp(rt2x00dev) || 261 rt2800_init_bbp(rt2x00dev) ||
280 rt2800_init_rfcsr(rt2x00dev))) 262 rt2800_init_rfcsr(rt2x00dev)))
@@ -295,9 +277,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
295 277
296 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg); 278 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
297 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0); 279 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
298 /* Don't use bulk in aggregation when working with USB 1.1 */ 280 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
299 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
300 (rt2x00dev->rx->usb_maxpacket == 512));
301 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128); 281 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
302 /* 282 /*
303 * Total room for RX frames in kilobytes, PBF might still exceed 283 * Total room for RX frames in kilobytes, PBF might still exceed
@@ -346,7 +326,7 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
346 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); 326 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
347 327
348 /* Wait for DMA, ignore error */ 328 /* Wait for DMA, ignore error */
349 rt2800usb_wait_wpdma_ready(rt2x00dev); 329 rt2800_wait_wpdma_ready(rt2x00dev);
350 330
351 rt2x00usb_disable_radio(rt2x00dev); 331 rt2x00usb_disable_radio(rt2x00dev);
352} 332}
@@ -573,41 +553,57 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
573{ 553{
574 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 554 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
575 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 555 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
576 __le32 *rxd = (__le32 *)entry->skb->data; 556 __le32 *rxi = (__le32 *)entry->skb->data;
577 __le32 *rxwi; 557 __le32 *rxwi;
578 u32 rxd0; 558 __le32 *rxd;
559 u32 rxi0;
579 u32 rxwi0; 560 u32 rxwi0;
580 u32 rxwi1; 561 u32 rxwi1;
581 u32 rxwi2; 562 u32 rxwi2;
582 u32 rxwi3; 563 u32 rxwi3;
564 u32 rxd0;
565 int rx_pkt_len;
566
567 /*
568 * RX frame format is :
569 * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
570 * |<------------ rx_pkt_len -------------->|
571 */
572 rt2x00_desc_read(rxi, 0, &rxi0);
573 rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
574
575 rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
576
577 /*
578 * FIXME : we need to check for rx_pkt_len validity
579 */
580 rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
583 581
584 /* 582 /*
585 * Copy descriptor to the skbdesc->desc buffer, making it safe from 583 * Copy descriptor to the skbdesc->desc buffer, making it safe from
586 * moving of frame data in rt2x00usb. 584 * moving of frame data in rt2x00usb.
587 */ 585 */
588 memcpy(skbdesc->desc, rxd, skbdesc->desc_len); 586 memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
589 rxd = (__le32 *)skbdesc->desc;
590 rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
591 587
592 /* 588 /*
593 * It is now safe to read the descriptor on all architectures. 589 * It is now safe to read the descriptor on all architectures.
594 */ 590 */
595 rt2x00_desc_read(rxd, 0, &rxd0);
596 rt2x00_desc_read(rxwi, 0, &rxwi0); 591 rt2x00_desc_read(rxwi, 0, &rxwi0);
597 rt2x00_desc_read(rxwi, 1, &rxwi1); 592 rt2x00_desc_read(rxwi, 1, &rxwi1);
598 rt2x00_desc_read(rxwi, 2, &rxwi2); 593 rt2x00_desc_read(rxwi, 2, &rxwi2);
599 rt2x00_desc_read(rxwi, 3, &rxwi3); 594 rt2x00_desc_read(rxwi, 3, &rxwi3);
595 rt2x00_desc_read(rxd, 0, &rxd0);
600 596
601 if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR)) 597 if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
602 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 598 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
603 599
604 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 600 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
605 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF); 601 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
606 rxdesc->cipher_status = 602 rxdesc->cipher_status =
607 rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR); 603 rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
608 } 604 }
609 605
610 if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) { 606 if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
611 /* 607 /*
612 * Hardware has stripped IV/EIV data from 802.11 frame during 608 * Hardware has stripped IV/EIV data from 802.11 frame during
613 * decryption. Unfortunately the descriptor doesn't contain 609 * decryption. Unfortunately the descriptor doesn't contain
@@ -622,13 +618,11 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
622 rxdesc->flags |= RX_FLAG_MMIC_ERROR; 618 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
623 } 619 }
624 620
625 if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS)) 621 if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
626 rxdesc->dev_flags |= RXDONE_MY_BSS; 622 rxdesc->dev_flags |= RXDONE_MY_BSS;
627 623
628 if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) { 624 if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
629 rxdesc->dev_flags |= RXDONE_L2PAD; 625 rxdesc->dev_flags |= RXDONE_L2PAD;
630 skbdesc->flags |= SKBDESC_L2_PADDED;
631 }
632 626
633 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI)) 627 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
634 rxdesc->flags |= RX_FLAG_SHORT_GI; 628 rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -663,7 +657,6 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
663 * Remove RXWI descriptor from start of buffer. 657 * Remove RXWI descriptor from start of buffer.
664 */ 658 */
665 skb_pull(entry->skb, skbdesc->desc_len); 659 skb_pull(entry->skb, skbdesc->desc_len);
666 skb_trim(entry->skb, rxdesc->size);
667} 660}
668 661
669/* 662/*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 1e4340a182e..d1d8ae94b4d 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,6 +79,8 @@
79 */ 79 */
80#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 80#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
81#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 81#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
82#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
83#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
82 84
83/* 85/*
84 * TX Info structure 86 * TX Info structure
@@ -101,6 +103,54 @@
101#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000) 103#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
102 104
103/* 105/*
106 * RX Info structure
107 */
108
109/*
110 * Word 0
111 */
112
113#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
114
115/*
116 * RX WI structure
117 */
118
119/*
120 * Word0
121 */
122#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
123#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
124#define RXWI_W0_BSSID FIELD32(0x00001c00)
125#define RXWI_W0_UDF FIELD32(0x0000e000)
126#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
127#define RXWI_W0_TID FIELD32(0xf0000000)
128
129/*
130 * Word1
131 */
132#define RXWI_W1_FRAG FIELD32(0x0000000f)
133#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
134#define RXWI_W1_MCS FIELD32(0x007f0000)
135#define RXWI_W1_BW FIELD32(0x00800000)
136#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
137#define RXWI_W1_STBC FIELD32(0x06000000)
138#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
139
140/*
141 * Word2
142 */
143#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
144#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
145#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
146
147/*
148 * Word3
149 */
150#define RXWI_W3_SNR0 FIELD32(0x000000ff)
151#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
152
153/*
104 * RX descriptor format for RX Ring. 154 * RX descriptor format for RX Ring.
105 */ 155 */
106 156
@@ -115,25 +165,25 @@
115 * AMSDU: rx with 802.3 header, not 802.11 header. 165 * AMSDU: rx with 802.3 header, not 802.11 header.
116 */ 166 */
117 167
118#define RXINFO_W0_BA FIELD32(0x00000001) 168#define RXD_W0_BA FIELD32(0x00000001)
119#define RXINFO_W0_DATA FIELD32(0x00000002) 169#define RXD_W0_DATA FIELD32(0x00000002)
120#define RXINFO_W0_NULLDATA FIELD32(0x00000004) 170#define RXD_W0_NULLDATA FIELD32(0x00000004)
121#define RXINFO_W0_FRAG FIELD32(0x00000008) 171#define RXD_W0_FRAG FIELD32(0x00000008)
122#define RXINFO_W0_UNICAST_TO_ME FIELD32(0x00000010) 172#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010)
123#define RXINFO_W0_MULTICAST FIELD32(0x00000020) 173#define RXD_W0_MULTICAST FIELD32(0x00000020)
124#define RXINFO_W0_BROADCAST FIELD32(0x00000040) 174#define RXD_W0_BROADCAST FIELD32(0x00000040)
125#define RXINFO_W0_MY_BSS FIELD32(0x00000080) 175#define RXD_W0_MY_BSS FIELD32(0x00000080)
126#define RXINFO_W0_CRC_ERROR FIELD32(0x00000100) 176#define RXD_W0_CRC_ERROR FIELD32(0x00000100)
127#define RXINFO_W0_CIPHER_ERROR FIELD32(0x00000600) 177#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600)
128#define RXINFO_W0_AMSDU FIELD32(0x00000800) 178#define RXD_W0_AMSDU FIELD32(0x00000800)
129#define RXINFO_W0_HTC FIELD32(0x00001000) 179#define RXD_W0_HTC FIELD32(0x00001000)
130#define RXINFO_W0_RSSI FIELD32(0x00002000) 180#define RXD_W0_RSSI FIELD32(0x00002000)
131#define RXINFO_W0_L2PAD FIELD32(0x00004000) 181#define RXD_W0_L2PAD FIELD32(0x00004000)
132#define RXINFO_W0_AMPDU FIELD32(0x00008000) 182#define RXD_W0_AMPDU FIELD32(0x00008000)
133#define RXINFO_W0_DECRYPTED FIELD32(0x00010000) 183#define RXD_W0_DECRYPTED FIELD32(0x00010000)
134#define RXINFO_W0_PLCP_RSSI FIELD32(0x00020000) 184#define RXD_W0_PLCP_RSSI FIELD32(0x00020000)
135#define RXINFO_W0_CIPHER_ALG FIELD32(0x00040000) 185#define RXD_W0_CIPHER_ALG FIELD32(0x00040000)
136#define RXINFO_W0_LAST_AMSDU FIELD32(0x00080000) 186#define RXD_W0_LAST_AMSDU FIELD32(0x00080000)
137#define RXINFO_W0_PLCP_SIGNAL FIELD32(0xfff00000) 187#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000)
138 188
139#endif /* RT2800USB_H */ 189#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index dcfc8c25d1a..096da85a66f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -104,6 +104,12 @@
104#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate)) 104#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate))
105 105
106/* 106/*
107 * Determine the number of L2 padding bytes required between the header and
108 * the payload.
109 */
110#define L2PAD_SIZE(__hdrlen) (-(__hdrlen) & 3)
111
112/*
107 * Determine the alignment requirement, 113 * Determine the alignment requirement,
108 * to make sure the 802.11 payload is padded to a 4-byte boundrary 114 * to make sure the 802.11 payload is padded to a 4-byte boundrary
109 * we must determine the address of the payload and calculate the 115 * we must determine the address of the payload and calculate the
@@ -937,25 +943,25 @@ static inline void rt2x00_print_chip(struct rt2x00_dev *rt2x00dev)
937 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev); 943 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
938} 944}
939 945
940static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip) 946static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
941{ 947{
942 return (chipset->rt == chip); 948 return (rt2x00dev->chip.rt == rt);
943} 949}
944 950
945static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip) 951static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
946{ 952{
947 return (chipset->rf == chip); 953 return (rt2x00dev->chip.rf == rf);
948} 954}
949 955
950static inline u32 rt2x00_rev(const struct rt2x00_chip *chipset) 956static inline u32 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
951{ 957{
952 return chipset->rev; 958 return rt2x00dev->chip.rev;
953} 959}
954 960
955static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset, 961static inline bool rt2x00_check_rev(struct rt2x00_dev *rt2x00dev,
956 const u32 mask, const u32 rev) 962 const u32 mask, const u32 rev)
957{ 963{
958 return ((chipset->rev & mask) == rev); 964 return ((rt2x00dev->chip.rev & mask) == rev);
959} 965}
960 966
961static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev, 967static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
@@ -964,20 +970,20 @@ static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
964 rt2x00dev->chip.intf = intf; 970 rt2x00dev->chip.intf = intf;
965} 971}
966 972
967static inline bool rt2x00_intf(const struct rt2x00_chip *chipset, 973static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev,
968 enum rt2x00_chip_intf intf) 974 enum rt2x00_chip_intf intf)
969{ 975{
970 return (chipset->intf == intf); 976 return (rt2x00dev->chip.intf == intf);
971} 977}
972 978
973static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev) 979static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev)
974{ 980{
975 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI); 981 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
976} 982}
977 983
978static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev) 984static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev)
979{ 985{
980 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB); 986 return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
981} 987}
982 988
983/** 989/**
@@ -1019,9 +1025,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
1019int rt2x00mac_start(struct ieee80211_hw *hw); 1025int rt2x00mac_start(struct ieee80211_hw *hw);
1020void rt2x00mac_stop(struct ieee80211_hw *hw); 1026void rt2x00mac_stop(struct ieee80211_hw *hw);
1021int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1027int rt2x00mac_add_interface(struct ieee80211_hw *hw,
1022 struct ieee80211_if_init_conf *conf); 1028 struct ieee80211_vif *vif);
1023void rt2x00mac_remove_interface(struct ieee80211_hw *hw, 1029void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
1024 struct ieee80211_if_init_conf *conf); 1030 struct ieee80211_vif *vif);
1025int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed); 1031int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
1026void rt2x00mac_configure_filter(struct ieee80211_hw *hw, 1032void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
1027 unsigned int changed_flags, 1033 unsigned int changed_flags,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 265e66dba55..b93731b7990 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -385,9 +385,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
385 memset(&rxdesc, 0, sizeof(rxdesc)); 385 memset(&rxdesc, 0, sizeof(rxdesc));
386 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); 386 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
387 387
388 /* Trim buffer to correct size */
389 skb_trim(entry->skb, rxdesc.size);
390
391 /* 388 /*
392 * The data behind the ieee80211 header must be 389 * The data behind the ieee80211 header must be
393 * aligned on a 4 byte boundary. 390 * aligned on a 4 byte boundary.
@@ -404,11 +401,16 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
404 (rxdesc.flags & RX_FLAG_IV_STRIPPED)) 401 (rxdesc.flags & RX_FLAG_IV_STRIPPED))
405 rt2x00crypto_rx_insert_iv(entry->skb, header_length, 402 rt2x00crypto_rx_insert_iv(entry->skb, header_length,
406 &rxdesc); 403 &rxdesc);
407 else if (rxdesc.dev_flags & RXDONE_L2PAD) 404 else if (header_length &&
405 (rxdesc.size > header_length) &&
406 (rxdesc.dev_flags & RXDONE_L2PAD))
408 rt2x00queue_remove_l2pad(entry->skb, header_length); 407 rt2x00queue_remove_l2pad(entry->skb, header_length);
409 else 408 else
410 rt2x00queue_align_payload(entry->skb, header_length); 409 rt2x00queue_align_payload(entry->skb, header_length);
411 410
411 /* Trim buffer to correct size */
412 skb_trim(entry->skb, rxdesc.size);
413
412 /* 414 /*
413 * Check if the frame was received using HT. In that case, 415 * Check if the frame was received using HT. In that case,
414 * the rate is the MCS index and should be passed to mac80211 416 * the rate is the MCS index and should be passed to mac80211
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index de549c244ed..00f1f939f1b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -187,10 +187,10 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
187EXPORT_SYMBOL_GPL(rt2x00mac_stop); 187EXPORT_SYMBOL_GPL(rt2x00mac_stop);
188 188
189int rt2x00mac_add_interface(struct ieee80211_hw *hw, 189int rt2x00mac_add_interface(struct ieee80211_hw *hw,
190 struct ieee80211_if_init_conf *conf) 190 struct ieee80211_vif *vif)
191{ 191{
192 struct rt2x00_dev *rt2x00dev = hw->priv; 192 struct rt2x00_dev *rt2x00dev = hw->priv;
193 struct rt2x00_intf *intf = vif_to_intf(conf->vif); 193 struct rt2x00_intf *intf = vif_to_intf(vif);
194 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); 194 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
195 struct queue_entry *entry = NULL; 195 struct queue_entry *entry = NULL;
196 unsigned int i; 196 unsigned int i;
@@ -203,7 +203,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
203 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) 203 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
204 return -ENODEV; 204 return -ENODEV;
205 205
206 switch (conf->type) { 206 switch (vif->type) {
207 case NL80211_IFTYPE_AP: 207 case NL80211_IFTYPE_AP:
208 /* 208 /*
209 * We don't support mixed combinations of 209 * We don't support mixed combinations of
@@ -263,7 +263,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
263 * increase interface count and start initialization. 263 * increase interface count and start initialization.
264 */ 264 */
265 265
266 if (conf->type == NL80211_IFTYPE_AP) 266 if (vif->type == NL80211_IFTYPE_AP)
267 rt2x00dev->intf_ap_count++; 267 rt2x00dev->intf_ap_count++;
268 else 268 else
269 rt2x00dev->intf_sta_count++; 269 rt2x00dev->intf_sta_count++;
@@ -273,16 +273,16 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
273 mutex_init(&intf->beacon_skb_mutex); 273 mutex_init(&intf->beacon_skb_mutex);
274 intf->beacon = entry; 274 intf->beacon = entry;
275 275
276 if (conf->type == NL80211_IFTYPE_AP) 276 if (vif->type == NL80211_IFTYPE_AP)
277 memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN); 277 memcpy(&intf->bssid, vif->addr, ETH_ALEN);
278 memcpy(&intf->mac, conf->mac_addr, ETH_ALEN); 278 memcpy(&intf->mac, vif->addr, ETH_ALEN);
279 279
280 /* 280 /*
281 * The MAC adddress must be configured after the device 281 * The MAC adddress must be configured after the device
282 * has been initialized. Otherwise the device can reset 282 * has been initialized. Otherwise the device can reset
283 * the MAC registers. 283 * the MAC registers.
284 */ 284 */
285 rt2x00lib_config_intf(rt2x00dev, intf, conf->type, intf->mac, NULL); 285 rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL);
286 286
287 /* 287 /*
288 * Some filters depend on the current working mode. We can force 288 * Some filters depend on the current working mode. We can force
@@ -296,10 +296,10 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
296EXPORT_SYMBOL_GPL(rt2x00mac_add_interface); 296EXPORT_SYMBOL_GPL(rt2x00mac_add_interface);
297 297
298void rt2x00mac_remove_interface(struct ieee80211_hw *hw, 298void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
299 struct ieee80211_if_init_conf *conf) 299 struct ieee80211_vif *vif)
300{ 300{
301 struct rt2x00_dev *rt2x00dev = hw->priv; 301 struct rt2x00_dev *rt2x00dev = hw->priv;
302 struct rt2x00_intf *intf = vif_to_intf(conf->vif); 302 struct rt2x00_intf *intf = vif_to_intf(vif);
303 303
304 /* 304 /*
305 * Don't allow interfaces to be remove while 305 * Don't allow interfaces to be remove while
@@ -307,11 +307,11 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
307 * no interface is present. 307 * no interface is present.
308 */ 308 */
309 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || 309 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
310 (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) || 310 (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
311 (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count)) 311 (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
312 return; 312 return;
313 313
314 if (conf->type == NL80211_IFTYPE_AP) 314 if (vif->type == NL80211_IFTYPE_AP)
315 rt2x00dev->intf_ap_count--; 315 rt2x00dev->intf_ap_count--;
316 else 316 else
317 rt2x00dev->intf_sta_count--; 317 rt2x00dev->intf_sta_count--;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0feb4d0e466..801be436cf1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -41,6 +41,9 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
41{ 41{
42 unsigned int i; 42 unsigned int i;
43 43
44 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
45 return 0;
46
44 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 47 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
45 rt2x00pci_register_read(rt2x00dev, offset, reg); 48 rt2x00pci_register_read(rt2x00dev, offset, reg);
46 if (!rt2x00_get_field32(*reg, field)) 49 if (!rt2x00_get_field32(*reg, field))
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 9915a09141e..0b4801a1460 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -177,55 +177,45 @@ void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
177 177
178void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 178void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
179{ 179{
180 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 180 unsigned int payload_length = skb->len - header_length;
181 unsigned int frame_length = skb->len;
182 unsigned int header_align = ALIGN_SIZE(skb, 0); 181 unsigned int header_align = ALIGN_SIZE(skb, 0);
183 unsigned int payload_align = ALIGN_SIZE(skb, header_length); 182 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
184 unsigned int l2pad = 4 - (payload_align - header_align); 183 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
185 184
186 if (header_align == payload_align) { 185 /*
187 /* 186 * Adjust the header alignment if the payload needs to be moved more
188 * Both header and payload must be moved the same 187 * than the header.
189 * amount of bytes to align them properly. This means 188 */
190 * we don't use the L2 padding but just move the entire 189 if (payload_align > header_align)
191 * frame. 190 header_align += 4;
192 */ 191
193 rt2x00queue_align_frame(skb); 192 /* There is nothing to do if no alignment is needed */
194 } else if (!payload_align) { 193 if (!header_align)
195 /* 194 return;
196 * Simple L2 padding, only the header needs to be moved, 195
197 * the payload is already properly aligned. 196 /* Reserve the amount of space needed in front of the frame */
198 */ 197 skb_push(skb, header_align);
199 skb_push(skb, header_align); 198
200 memmove(skb->data, skb->data + header_align, frame_length); 199 /*
201 skbdesc->flags |= SKBDESC_L2_PADDED; 200 * Move the header.
202 } else { 201 */
203 /* 202 memmove(skb->data, skb->data + header_align, header_length);
204 *
205 * Complicated L2 padding, both header and payload need
206 * to be moved. By default we only move to the start
207 * of the buffer, so our header alignment needs to be
208 * increased if there is not enough room for the header
209 * to be moved.
210 */
211 if (payload_align > header_align)
212 header_align += 4;
213 203
214 skb_push(skb, header_align); 204 /* Move the payload, if present and if required */
215 memmove(skb->data, skb->data + header_align, header_length); 205 if (payload_length && payload_align)
216 memmove(skb->data + header_length + l2pad, 206 memmove(skb->data + header_length + l2pad,
217 skb->data + header_length + l2pad + payload_align, 207 skb->data + header_length + l2pad + payload_align,
218 frame_length - header_length); 208 payload_length);
219 skbdesc->flags |= SKBDESC_L2_PADDED; 209
220 } 210 /* Trim the skb to the correct size */
211 skb_trim(skb, header_length + l2pad + payload_length);
221} 212}
222 213
223void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 214void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
224{ 215{
225 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 216 unsigned int l2pad = L2PAD_SIZE(header_length);
226 unsigned int l2pad = 4 - (header_length & 3);
227 217
228 if (!l2pad || (skbdesc->flags & SKBDESC_L2_PADDED)) 218 if (!l2pad)
229 return; 219 return;
230 220
231 memmove(skb->data + l2pad, skb->data, header_length); 221 memmove(skb->data + l2pad, skb->data, header_length);
@@ -346,7 +336,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
346 * Header and alignment information. 336 * Header and alignment information.
347 */ 337 */
348 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 338 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
349 txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length); 339 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
340 (entry->skb->len > txdesc->header_length))
341 txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
350 342
351 /* 343 /*
352 * Check whether this frame is to be acked. 344 * Check whether this frame is to be acked.
@@ -387,10 +379,13 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
387 379
388 /* 380 /*
389 * Beacons and probe responses require the tsf timestamp 381 * Beacons and probe responses require the tsf timestamp
390 * to be inserted into the frame. 382 * to be inserted into the frame, except for a frame that has been injected
383 * through a monitor interface. This latter is needed for testing a
384 * monitor interface.
391 */ 385 */
392 if (ieee80211_is_beacon(hdr->frame_control) || 386 if ((ieee80211_is_beacon(hdr->frame_control) ||
393 ieee80211_is_probe_resp(hdr->frame_control)) 387 ieee80211_is_probe_resp(hdr->frame_control)) &&
388 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
394 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 389 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
395 390
396 /* 391 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 70775e5ba1a..c1e482bb37b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -92,8 +92,6 @@ enum data_queue_qid {
92 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX 92 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
93 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by 93 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
94 * mac80211 but was stripped for processing by the driver. 94 * mac80211 but was stripped for processing by the driver.
95 * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
96 * the padded bytes are located between header and payload.
97 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211, 95 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
98 * don't try to pass it back. 96 * don't try to pass it back.
99 */ 97 */
@@ -101,8 +99,7 @@ enum skb_frame_desc_flags {
101 SKBDESC_DMA_MAPPED_RX = 1 << 0, 99 SKBDESC_DMA_MAPPED_RX = 1 << 0,
102 SKBDESC_DMA_MAPPED_TX = 1 << 1, 100 SKBDESC_DMA_MAPPED_TX = 1 << 1,
103 SKBDESC_IV_STRIPPED = 1 << 2, 101 SKBDESC_IV_STRIPPED = 1 << 2,
104 SKBDESC_L2_PADDED = 1 << 3, 102 SKBDESC_NOT_MAC80211 = 1 << 3,
105 SKBDESC_NOT_MAC80211 = 1 << 4,
106}; 103};
107 104
108/** 105/**
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 0ca589306d7..1f97a797bc4 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -637,8 +637,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
637 rt61pci_bbp_read(rt2x00dev, 4, &r4); 637 rt61pci_bbp_read(rt2x00dev, 4, &r4);
638 rt61pci_bbp_read(rt2x00dev, 77, &r77); 638 rt61pci_bbp_read(rt2x00dev, 77, &r77);
639 639
640 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 640 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325));
641 rt2x00_rf(&rt2x00dev->chip, RF5325));
642 641
643 /* 642 /*
644 * Configure the RX antenna. 643 * Configure the RX antenna.
@@ -684,8 +683,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
684 rt61pci_bbp_read(rt2x00dev, 4, &r4); 683 rt61pci_bbp_read(rt2x00dev, 4, &r4);
685 rt61pci_bbp_read(rt2x00dev, 77, &r77); 684 rt61pci_bbp_read(rt2x00dev, 77, &r77);
686 685
687 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 686 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
688 rt2x00_rf(&rt2x00dev->chip, RF2529));
689 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 687 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
690 !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); 688 !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags));
691 689
@@ -833,12 +831,11 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
833 831
834 rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg); 832 rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg);
835 833
836 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 834 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325))
837 rt2x00_rf(&rt2x00dev->chip, RF5325))
838 rt61pci_config_antenna_5x(rt2x00dev, ant); 835 rt61pci_config_antenna_5x(rt2x00dev, ant);
839 else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) 836 else if (rt2x00_rf(rt2x00dev, RF2527))
840 rt61pci_config_antenna_2x(rt2x00dev, ant); 837 rt61pci_config_antenna_2x(rt2x00dev, ant);
841 else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) { 838 else if (rt2x00_rf(rt2x00dev, RF2529)) {
842 if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) 839 if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags))
843 rt61pci_config_antenna_2x(rt2x00dev, ant); 840 rt61pci_config_antenna_2x(rt2x00dev, ant);
844 else 841 else
@@ -879,8 +876,7 @@ static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev,
879 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); 876 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
880 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 877 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
881 878
882 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || 879 smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
883 rt2x00_rf(&rt2x00dev->chip, RF2527));
884 880
885 rt61pci_bbp_read(rt2x00dev, 3, &r3); 881 rt61pci_bbp_read(rt2x00dev, 3, &r3);
886 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); 882 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -2302,10 +2298,10 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2302 rt2x00_set_chip_rf(rt2x00dev, value, reg); 2298 rt2x00_set_chip_rf(rt2x00dev, value, reg);
2303 rt2x00_print_chip(rt2x00dev); 2299 rt2x00_print_chip(rt2x00dev);
2304 2300
2305 if (!rt2x00_rf(&rt2x00dev->chip, RF5225) && 2301 if (!rt2x00_rf(rt2x00dev, RF5225) &&
2306 !rt2x00_rf(&rt2x00dev->chip, RF5325) && 2302 !rt2x00_rf(rt2x00dev, RF5325) &&
2307 !rt2x00_rf(&rt2x00dev->chip, RF2527) && 2303 !rt2x00_rf(rt2x00dev, RF2527) &&
2308 !rt2x00_rf(&rt2x00dev->chip, RF2529)) { 2304 !rt2x00_rf(rt2x00dev, RF2529)) {
2309 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 2305 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
2310 return -ENODEV; 2306 return -ENODEV;
2311 } 2307 }
@@ -2360,7 +2356,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2360 * the antenna settings should be gathered from the NIC 2356 * the antenna settings should be gathered from the NIC
2361 * eeprom word. 2357 * eeprom word.
2362 */ 2358 */
2363 if (rt2x00_rf(&rt2x00dev->chip, RF2529) && 2359 if (rt2x00_rf(rt2x00dev, RF2529) &&
2364 !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) { 2360 !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) {
2365 rt2x00dev->default_ant.rx = 2361 rt2x00dev->default_ant.rx =
2366 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED); 2362 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
@@ -2571,8 +2567,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2571 spec->channels = rf_vals_seq; 2567 spec->channels = rf_vals_seq;
2572 } 2568 }
2573 2569
2574 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 2570 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) {
2575 rt2x00_rf(&rt2x00dev->chip, RF5325)) {
2576 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2571 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2577 spec->num_channels = ARRAY_SIZE(rf_vals_seq); 2572 spec->num_channels = ARRAY_SIZE(rf_vals_seq);
2578 } 2573 }
@@ -2812,7 +2807,7 @@ static const struct rt2x00_ops rt61pci_ops = {
2812/* 2807/*
2813 * RT61pci module information. 2808 * RT61pci module information.
2814 */ 2809 */
2815static struct pci_device_id rt61pci_device_table[] = { 2810static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
2816 /* RT2561s */ 2811 /* RT2561s */
2817 { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) }, 2812 { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) },
2818 /* RT2561 v2 */ 2813 /* RT2561 v2 */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index ced3b6ab5e1..a0269129439 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -136,8 +136,8 @@ static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
136 * all others contain 20 bits. 136 * all others contain 20 bits.
137 */ 137 */
138 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS, 138 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS,
139 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) || 139 20 + (rt2x00_rf(rt2x00dev, RF5225) ||
140 rt2x00_rf(&rt2x00dev->chip, RF2527))); 140 rt2x00_rf(rt2x00dev, RF2527)));
141 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0); 141 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
142 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1); 142 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
143 143
@@ -741,11 +741,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
741 741
742 rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg); 742 rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
743 743
744 if (rt2x00_rf(&rt2x00dev->chip, RF5226) || 744 if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225))
745 rt2x00_rf(&rt2x00dev->chip, RF5225))
746 rt73usb_config_antenna_5x(rt2x00dev, ant); 745 rt73usb_config_antenna_5x(rt2x00dev, ant);
747 else if (rt2x00_rf(&rt2x00dev->chip, RF2528) || 746 else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527))
748 rt2x00_rf(&rt2x00dev->chip, RF2527))
749 rt73usb_config_antenna_2x(rt2x00dev, ant); 747 rt73usb_config_antenna_2x(rt2x00dev, ant);
750} 748}
751 749
@@ -779,8 +777,7 @@ static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
779 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); 777 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
780 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 778 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
781 779
782 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || 780 smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
783 rt2x00_rf(&rt2x00dev->chip, RF2527));
784 781
785 rt73usb_bbp_read(rt2x00dev, 3, &r3); 782 rt73usb_bbp_read(rt2x00dev, 3, &r3);
786 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); 783 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1210,8 +1207,7 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1210 rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000); 1207 rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
1211 1208
1212 reg = 0x000023b0; 1209 reg = 0x000023b0;
1213 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 1210 if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))
1214 rt2x00_rf(&rt2x00dev->chip, RF2527))
1215 rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1); 1211 rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1);
1216 rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg); 1212 rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
1217 1213
@@ -1827,16 +1823,16 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1827 rt2x00_set_chip(rt2x00dev, RT2571, value, reg); 1823 rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
1828 rt2x00_print_chip(rt2x00dev); 1824 rt2x00_print_chip(rt2x00dev);
1829 1825
1830 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) || 1826 if (!rt2x00_check_rev(rt2x00dev, 0x000ffff0, 0x25730) ||
1831 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) { 1827 rt2x00_check_rev(rt2x00dev, 0x0000000f, 0)) {
1832 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 1828 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
1833 return -ENODEV; 1829 return -ENODEV;
1834 } 1830 }
1835 1831
1836 if (!rt2x00_rf(&rt2x00dev->chip, RF5226) && 1832 if (!rt2x00_rf(rt2x00dev, RF5226) &&
1837 !rt2x00_rf(&rt2x00dev->chip, RF2528) && 1833 !rt2x00_rf(rt2x00dev, RF2528) &&
1838 !rt2x00_rf(&rt2x00dev->chip, RF5225) && 1834 !rt2x00_rf(rt2x00dev, RF5225) &&
1839 !rt2x00_rf(&rt2x00dev->chip, RF2527)) { 1835 !rt2x00_rf(rt2x00dev, RF2527)) {
1840 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 1836 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1841 return -ENODEV; 1837 return -ENODEV;
1842 } 1838 }
@@ -2081,17 +2077,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2081 spec->supported_bands = SUPPORT_BAND_2GHZ; 2077 spec->supported_bands = SUPPORT_BAND_2GHZ;
2082 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2078 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2083 2079
2084 if (rt2x00_rf(&rt2x00dev->chip, RF2528)) { 2080 if (rt2x00_rf(rt2x00dev, RF2528)) {
2085 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); 2081 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
2086 spec->channels = rf_vals_bg_2528; 2082 spec->channels = rf_vals_bg_2528;
2087 } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) { 2083 } else if (rt2x00_rf(rt2x00dev, RF5226)) {
2088 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2084 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2089 spec->num_channels = ARRAY_SIZE(rf_vals_5226); 2085 spec->num_channels = ARRAY_SIZE(rf_vals_5226);
2090 spec->channels = rf_vals_5226; 2086 spec->channels = rf_vals_5226;
2091 } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) { 2087 } else if (rt2x00_rf(rt2x00dev, RF2527)) {
2092 spec->num_channels = 14; 2088 spec->num_channels = 14;
2093 spec->channels = rf_vals_5225_2527; 2089 spec->channels = rf_vals_5225_2527;
2094 } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) { 2090 } else if (rt2x00_rf(rt2x00dev, RF5225)) {
2095 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2091 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2096 spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527); 2092 spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
2097 spec->channels = rf_vals_5225_2527; 2093 spec->channels = rf_vals_5225_2527;
@@ -2354,6 +2350,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2354 { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) }, 2350 { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
2355 /* Buffalo */ 2351 /* Buffalo */
2356 { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) }, 2352 { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
2353 { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) },
2357 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, 2354 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
2358 { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) }, 2355 { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
2359 { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) }, 2356 { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 8721282a818..de3844fe06d 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -60,7 +60,6 @@ struct rtl8180_priv {
60 struct rtl818x_csr __iomem *map; 60 struct rtl818x_csr __iomem *map;
61 const struct rtl818x_rf_ops *rf; 61 const struct rtl818x_rf_ops *rf;
62 struct ieee80211_vif *vif; 62 struct ieee80211_vif *vif;
63 int mode;
64 63
65 /* rtl8180 driver specific */ 64 /* rtl8180 driver specific */
66 spinlock_t lock; 65 spinlock_t lock;
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 8a40a143998..b9192bfcc55 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -33,7 +33,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
33MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver"); 33MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35 35
36static struct pci_device_id rtl8180_table[] __devinitdata = { 36static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
37 /* rtl8185 */ 37 /* rtl8185 */
38 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) }, 38 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
39 { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) }, 39 { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -82,8 +82,6 @@ static const struct ieee80211_channel rtl818x_channels[] = {
82}; 82};
83 83
84 84
85
86
87void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) 85void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
88{ 86{
89 struct rtl8180_priv *priv = dev->priv; 87 struct rtl8180_priv *priv = dev->priv;
@@ -615,7 +613,6 @@ static int rtl8180_start(struct ieee80211_hw *dev)
615 reg |= RTL818X_CMD_TX_ENABLE; 613 reg |= RTL818X_CMD_TX_ENABLE;
616 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 614 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
617 615
618 priv->mode = NL80211_IFTYPE_MONITOR;
619 return 0; 616 return 0;
620 617
621 err_free_rings: 618 err_free_rings:
@@ -633,8 +630,6 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
633 u8 reg; 630 u8 reg;
634 int i; 631 int i;
635 632
636 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
637
638 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 633 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
639 634
640 reg = rtl818x_ioread8(priv, &priv->map->CMD); 635 reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -657,38 +652,39 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
657} 652}
658 653
659static int rtl8180_add_interface(struct ieee80211_hw *dev, 654static int rtl8180_add_interface(struct ieee80211_hw *dev,
660 struct ieee80211_if_init_conf *conf) 655 struct ieee80211_vif *vif)
661{ 656{
662 struct rtl8180_priv *priv = dev->priv; 657 struct rtl8180_priv *priv = dev->priv;
663 658
664 if (priv->mode != NL80211_IFTYPE_MONITOR) 659 /*
665 return -EOPNOTSUPP; 660 * We only support one active interface at a time.
661 */
662 if (priv->vif)
663 return -EBUSY;
666 664
667 switch (conf->type) { 665 switch (vif->type) {
668 case NL80211_IFTYPE_STATION: 666 case NL80211_IFTYPE_STATION:
669 priv->mode = conf->type;
670 break; 667 break;
671 default: 668 default:
672 return -EOPNOTSUPP; 669 return -EOPNOTSUPP;
673 } 670 }
674 671
675 priv->vif = conf->vif; 672 priv->vif = vif;
676 673
677 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 674 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
678 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0], 675 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
679 le32_to_cpu(*(__le32 *)conf->mac_addr)); 676 le32_to_cpu(*(__le32 *)vif->addr));
680 rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4], 677 rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4],
681 le16_to_cpu(*(__le16 *)(conf->mac_addr + 4))); 678 le16_to_cpu(*(__le16 *)(vif->addr + 4)));
682 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 679 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
683 680
684 return 0; 681 return 0;
685} 682}
686 683
687static void rtl8180_remove_interface(struct ieee80211_hw *dev, 684static void rtl8180_remove_interface(struct ieee80211_hw *dev,
688 struct ieee80211_if_init_conf *conf) 685 struct ieee80211_vif *vif)
689{ 686{
690 struct rtl8180_priv *priv = dev->priv; 687 struct rtl8180_priv *priv = dev->priv;
691 priv->mode = NL80211_IFTYPE_MONITOR;
692 priv->vif = NULL; 688 priv->vif = NULL;
693} 689}
694 690
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6af0f3f71f3..6bb32112e65 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -92,7 +92,7 @@ struct rtl8187_priv {
92 struct rtl818x_csr *map; 92 struct rtl818x_csr *map;
93 const struct rtl818x_rf_ops *rf; 93 const struct rtl818x_rf_ops *rf;
94 struct ieee80211_vif *vif; 94 struct ieee80211_vif *vif;
95 int mode; 95
96 /* The mutex protects the TX loopback state. 96 /* The mutex protects the TX loopback state.
97 * Any attempt to set channels concurrently locks the device. 97 * Any attempt to set channels concurrently locks the device.
98 */ 98 */
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726dd5fe..f336c63053c 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1018,31 +1018,30 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
1018} 1018}
1019 1019
1020static int rtl8187_add_interface(struct ieee80211_hw *dev, 1020static int rtl8187_add_interface(struct ieee80211_hw *dev,
1021 struct ieee80211_if_init_conf *conf) 1021 struct ieee80211_vif *vif)
1022{ 1022{
1023 struct rtl8187_priv *priv = dev->priv; 1023 struct rtl8187_priv *priv = dev->priv;
1024 int i; 1024 int i;
1025 int ret = -EOPNOTSUPP; 1025 int ret = -EOPNOTSUPP;
1026 1026
1027 mutex_lock(&priv->conf_mutex); 1027 mutex_lock(&priv->conf_mutex);
1028 if (priv->mode != NL80211_IFTYPE_MONITOR) 1028 if (priv->vif)
1029 goto exit; 1029 goto exit;
1030 1030
1031 switch (conf->type) { 1031 switch (vif->type) {
1032 case NL80211_IFTYPE_STATION: 1032 case NL80211_IFTYPE_STATION:
1033 priv->mode = conf->type;
1034 break; 1033 break;
1035 default: 1034 default:
1036 goto exit; 1035 goto exit;
1037 } 1036 }
1038 1037
1039 ret = 0; 1038 ret = 0;
1040 priv->vif = conf->vif; 1039 priv->vif = vif;
1041 1040
1042 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 1041 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
1043 for (i = 0; i < ETH_ALEN; i++) 1042 for (i = 0; i < ETH_ALEN; i++)
1044 rtl818x_iowrite8(priv, &priv->map->MAC[i], 1043 rtl818x_iowrite8(priv, &priv->map->MAC[i],
1045 ((u8 *)conf->mac_addr)[i]); 1044 ((u8 *)vif->addr)[i]);
1046 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 1045 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
1047 1046
1048exit: 1047exit:
@@ -1051,11 +1050,10 @@ exit:
1051} 1050}
1052 1051
1053static void rtl8187_remove_interface(struct ieee80211_hw *dev, 1052static void rtl8187_remove_interface(struct ieee80211_hw *dev,
1054 struct ieee80211_if_init_conf *conf) 1053 struct ieee80211_vif *vif)
1055{ 1054{
1056 struct rtl8187_priv *priv = dev->priv; 1055 struct rtl8187_priv *priv = dev->priv;
1057 mutex_lock(&priv->conf_mutex); 1056 mutex_lock(&priv->conf_mutex);
1058 priv->mode = NL80211_IFTYPE_MONITOR;
1059 priv->vif = NULL; 1057 priv->vif = NULL;
1060 mutex_unlock(&priv->conf_mutex); 1058 mutex_unlock(&priv->conf_mutex);
1061} 1059}
@@ -1365,7 +1363,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1365 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 1363 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
1366 1364
1367 1365
1368 priv->mode = NL80211_IFTYPE_MONITOR;
1369 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1366 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1370 IEEE80211_HW_SIGNAL_DBM | 1367 IEEE80211_HW_SIGNAL_DBM |
1371 IEEE80211_HW_RX_INCLUDES_FCS; 1368 IEEE80211_HW_RX_INCLUDES_FCS;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index ded44c045eb..f82aa8b4bdd 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -33,7 +33,7 @@ static void led_turn_on(struct work_struct *work)
33 struct rtl8187_led *led = &priv->led_tx; 33 struct rtl8187_led *led = &priv->led_tx;
34 34
35 /* Don't change the LED, when the device is down. */ 35 /* Don't change the LED, when the device is down. */
36 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) 36 if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
37 return ; 37 return ;
38 38
39 /* Skip if the LED is not registered. */ 39 /* Skip if the LED is not registered. */
@@ -71,7 +71,7 @@ static void led_turn_off(struct work_struct *work)
71 struct rtl8187_led *led = &priv->led_tx; 71 struct rtl8187_led *led = &priv->led_tx;
72 72
73 /* Don't change the LED, when the device is down. */ 73 /* Don't change the LED, when the device is down. */
74 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) 74 if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
75 return ; 75 return ;
76 76
77 /* Skip if the LED is not registered. */ 77 /* Skip if the LED is not registered. */
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 054533f7a12..6301578d156 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -247,6 +247,7 @@ struct wl1251_debugfs {
247 struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data; 247 struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
248 248
249 struct dentry *tx_queue_len; 249 struct dentry *tx_queue_len;
250 struct dentry *tx_queue_status;
250 251
251 struct dentry *retry_count; 252 struct dentry *retry_count;
252 struct dentry *excessive_retries; 253 struct dentry *excessive_retries;
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index acfa086dbfc..beff084040b 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -976,3 +976,72 @@ out:
976 kfree(acx); 976 kfree(acx);
977 return ret; 977 return ret;
978} 978}
979
980int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
981 u8 aifs, u16 txop)
982{
983 struct wl1251_acx_ac_cfg *acx;
984 int ret = 0;
985
986 wl1251_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
987 "aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
988
989 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
990
991 if (!acx) {
992 ret = -ENOMEM;
993 goto out;
994 }
995
996 acx->ac = ac;
997 acx->cw_min = cw_min;
998 acx->cw_max = cw_max;
999 acx->aifsn = aifs;
1000 acx->txop_limit = txop;
1001
1002 ret = wl1251_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
1003 if (ret < 0) {
1004 wl1251_warning("acx ac cfg failed: %d", ret);
1005 goto out;
1006 }
1007
1008out:
1009 kfree(acx);
1010 return ret;
1011}
1012
1013int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
1014 enum wl1251_acx_channel_type type,
1015 u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
1016 enum wl1251_acx_ack_policy ack_policy)
1017{
1018 struct wl1251_acx_tid_cfg *acx;
1019 int ret = 0;
1020
1021 wl1251_debug(DEBUG_ACX, "acx tid cfg %d type %d tsid %d "
1022 "ps_scheme %d ack_policy %d", queue, type, tsid,
1023 ps_scheme, ack_policy);
1024
1025 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1026
1027 if (!acx) {
1028 ret = -ENOMEM;
1029 goto out;
1030 }
1031
1032 acx->queue = queue;
1033 acx->type = type;
1034 acx->tsid = tsid;
1035 acx->ps_scheme = ps_scheme;
1036 acx->ack_policy = ack_policy;
1037
1038 ret = wl1251_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
1039 if (ret < 0) {
1040 wl1251_warning("acx tid cfg failed: %d", ret);
1041 goto out;
1042 }
1043
1044out:
1045 kfree(acx);
1046 return ret;
1047}
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 652371432cd..26160c45784 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -1166,6 +1166,87 @@ struct wl1251_acx_wr_tbtt_and_dtim {
1166 u8 padding; 1166 u8 padding;
1167} __attribute__ ((packed)); 1167} __attribute__ ((packed));
1168 1168
1169struct wl1251_acx_ac_cfg {
1170 struct acx_header header;
1171
1172 /*
1173 * Access Category - The TX queue's access category
1174 * (refer to AccessCategory_enum)
1175 */
1176 u8 ac;
1177
1178 /*
1179 * The contention window minimum size (in slots) for
1180 * the access class.
1181 */
1182 u8 cw_min;
1183
1184 /*
1185 * The contention window maximum size (in slots) for
1186 * the access class.
1187 */
1188 u16 cw_max;
1189
1190 /* The AIF value (in slots) for the access class. */
1191 u8 aifsn;
1192
1193 u8 reserved;
1194
1195 /* The TX Op Limit (in microseconds) for the access class. */
1196 u16 txop_limit;
1197} __attribute__ ((packed));
1198
1199
1200enum wl1251_acx_channel_type {
1201 CHANNEL_TYPE_DCF = 0,
1202 CHANNEL_TYPE_EDCF = 1,
1203 CHANNEL_TYPE_HCCA = 2,
1204};
1205
1206enum wl1251_acx_ps_scheme {
1207 /* regular ps: simple sending of packets */
1208 WL1251_ACX_PS_SCHEME_LEGACY = 0,
1209
1210 /* sending a packet triggers a unscheduled apsd downstream */
1211 WL1251_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
1212
1213 /* a pspoll packet will be sent before every data packet */
1214 WL1251_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
1215
1216 /* scheduled apsd mode */
1217 WL1251_ACX_PS_SCHEME_SAPSD = 3,
1218};
1219
1220enum wl1251_acx_ack_policy {
1221 WL1251_ACX_ACK_POLICY_LEGACY = 0,
1222 WL1251_ACX_ACK_POLICY_NO_ACK = 1,
1223 WL1251_ACX_ACK_POLICY_BLOCK = 2,
1224};
1225
1226struct wl1251_acx_tid_cfg {
1227 struct acx_header header;
1228
1229 /* tx queue id number (0-7) */
1230 u8 queue;
1231
1232 /* channel access type for the queue, enum wl1251_acx_channel_type */
1233 u8 type;
1234
1235 /* EDCA: ac index (0-3), HCCA: traffic stream id (8-15) */
1236 u8 tsid;
1237
1238 /* ps scheme of the specified queue, enum wl1251_acx_ps_scheme */
1239 u8 ps_scheme;
1240
1241 /* the tx queue ack policy, enum wl1251_acx_ack_policy */
1242 u8 ack_policy;
1243
1244 u8 padding[3];
1245
1246 /* not supported */
1247 u32 apsdconf[2];
1248} __attribute__ ((packed));
1249
1169/************************************************************************* 1250/*************************************************************************
1170 1251
1171 Host Interrupt Register (WiLink -> Host) 1252 Host Interrupt Register (WiLink -> Host)
@@ -1322,5 +1403,11 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
1322int wl1251_acx_rate_policies(struct wl1251 *wl); 1403int wl1251_acx_rate_policies(struct wl1251 *wl);
1323int wl1251_acx_mem_cfg(struct wl1251 *wl); 1404int wl1251_acx_mem_cfg(struct wl1251 *wl);
1324int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim); 1405int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
1406int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
1407 u8 aifs, u16 txop);
1408int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
1409 enum wl1251_acx_channel_type type,
1410 u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
1411 enum wl1251_acx_ack_policy ack_policy);
1325 1412
1326#endif /* __WL1251_ACX_H__ */ 1413#endif /* __WL1251_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index 770f260726b..0320b478bb3 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -410,3 +410,86 @@ out:
410 kfree(cmd); 410 kfree(cmd);
411 return ret; 411 return ret;
412} 412}
413
414int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
415 struct ieee80211_channel *channels[],
416 unsigned int n_channels, unsigned int n_probes)
417{
418 struct wl1251_cmd_scan *cmd;
419 int i, ret = 0;
420
421 wl1251_debug(DEBUG_CMD, "cmd scan");
422
423 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
424 if (!cmd)
425 return -ENOMEM;
426
427 cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
428 cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
429 CFG_RX_MGMT_EN |
430 CFG_RX_BCN_EN);
431 cmd->params.scan_options = 0;
432 cmd->params.num_channels = n_channels;
433 cmd->params.num_probe_requests = n_probes;
434 cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
435 cmd->params.tid_trigger = 0;
436
437 for (i = 0; i < n_channels; i++) {
438 cmd->channels[i].min_duration =
439 cpu_to_le32(WL1251_SCAN_MIN_DURATION);
440 cmd->channels[i].max_duration =
441 cpu_to_le32(WL1251_SCAN_MAX_DURATION);
442 memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
443 memset(&cmd->channels[i].bssid_msb, 0xff, 2);
444 cmd->channels[i].early_termination = 0;
445 cmd->channels[i].tx_power_att = 0;
446 cmd->channels[i].channel = channels[i]->hw_value;
447 }
448
449 cmd->params.ssid_len = ssid_len;
450 if (ssid)
451 memcpy(cmd->params.ssid, ssid, ssid_len);
452
453 ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
454 if (ret < 0) {
455 wl1251_error("cmd scan failed: %d", ret);
456 goto out;
457 }
458
459 wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
460
461 if (cmd->header.status != CMD_STATUS_SUCCESS) {
462 wl1251_error("cmd scan status wasn't success: %d",
463 cmd->header.status);
464 ret = -EIO;
465 goto out;
466 }
467
468out:
469 kfree(cmd);
470 return ret;
471}
472
473int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
474{
475 struct wl1251_cmd_trigger_scan_to *cmd;
476 int ret;
477
478 wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
479
480 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
481 if (!cmd)
482 return -ENOMEM;
483
484 cmd->timeout = timeout;
485
486 ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
487 if (ret < 0) {
488 wl1251_error("cmd trigger scan to failed: %d", ret);
489 goto out;
490 }
491
492out:
493 kfree(cmd);
494 return ret;
495}
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index dff798ad0ef..4ad67cae94d 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -27,6 +27,8 @@
27 27
28#include "wl1251.h" 28#include "wl1251.h"
29 29
30#include <net/cfg80211.h>
31
30struct acx_header; 32struct acx_header;
31 33
32int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len); 34int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len);
@@ -43,6 +45,10 @@ int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
43 size_t len); 45 size_t len);
44int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id, 46int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
45 void *buf, size_t buf_len); 47 void *buf, size_t buf_len);
48int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
49 struct ieee80211_channel *channels[],
50 unsigned int n_channels, unsigned int n_probes);
51int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout);
46 52
47/* unit ms */ 53/* unit ms */
48#define WL1251_COMMAND_TIMEOUT 2000 54#define WL1251_COMMAND_TIMEOUT 2000
@@ -163,8 +169,12 @@ struct cmd_read_write_memory {
163#define CMDMBOX_HEADER_LEN 4 169#define CMDMBOX_HEADER_LEN 4
164#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 170#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
165 171
172#define WL1251_SCAN_MIN_DURATION 30000
173#define WL1251_SCAN_MAX_DURATION 60000
174
175#define WL1251_SCAN_NUM_PROBES 3
166 176
167struct basic_scan_parameters { 177struct wl1251_scan_parameters {
168 u32 rx_config_options; 178 u32 rx_config_options;
169 u32 rx_filter_options; 179 u32 rx_filter_options;
170 180
@@ -189,11 +199,11 @@ struct basic_scan_parameters {
189 199
190 u8 tid_trigger; 200 u8 tid_trigger;
191 u8 ssid_len; 201 u8 ssid_len;
192 u32 ssid[8]; 202 u8 ssid[32];
193 203
194} __attribute__ ((packed)); 204} __attribute__ ((packed));
195 205
196struct basic_scan_channel_parameters { 206struct wl1251_scan_ch_parameters {
197 u32 min_duration; /* in TU */ 207 u32 min_duration; /* in TU */
198 u32 max_duration; /* in TU */ 208 u32 max_duration; /* in TU */
199 u32 bssid_lsb; 209 u32 bssid_lsb;
@@ -213,11 +223,11 @@ struct basic_scan_channel_parameters {
213/* SCAN parameters */ 223/* SCAN parameters */
214#define SCAN_MAX_NUM_OF_CHANNELS 16 224#define SCAN_MAX_NUM_OF_CHANNELS 16
215 225
216struct cmd_scan { 226struct wl1251_cmd_scan {
217 struct wl1251_cmd_header header; 227 struct wl1251_cmd_header header;
218 228
219 struct basic_scan_parameters params; 229 struct wl1251_scan_parameters params;
220 struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; 230 struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
221} __attribute__ ((packed)); 231} __attribute__ ((packed));
222 232
223enum { 233enum {
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
index a00723059f8..0ccba57fb9f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -237,6 +237,27 @@ static const struct file_operations tx_queue_len_ops = {
237 .open = wl1251_open_file_generic, 237 .open = wl1251_open_file_generic,
238}; 238};
239 239
240static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf,
241 size_t count, loff_t *ppos)
242{
243 struct wl1251 *wl = file->private_data;
244 char buf[3], status;
245 int len;
246
247 if (wl->tx_queue_stopped)
248 status = 's';
249 else
250 status = 'r';
251
252 len = scnprintf(buf, sizeof(buf), "%c\n", status);
253 return simple_read_from_buffer(userbuf, count, ppos, buf, len);
254}
255
256static const struct file_operations tx_queue_status_ops = {
257 .read = tx_queue_status_read,
258 .open = wl1251_open_file_generic,
259};
260
240static void wl1251_debugfs_delete_files(struct wl1251 *wl) 261static void wl1251_debugfs_delete_files(struct wl1251 *wl)
241{ 262{
242 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); 263 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -331,6 +352,7 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
331 DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data); 352 DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
332 353
333 DEBUGFS_DEL(tx_queue_len); 354 DEBUGFS_DEL(tx_queue_len);
355 DEBUGFS_DEL(tx_queue_status);
334 DEBUGFS_DEL(retry_count); 356 DEBUGFS_DEL(retry_count);
335 DEBUGFS_DEL(excessive_retries); 357 DEBUGFS_DEL(excessive_retries);
336} 358}
@@ -431,6 +453,7 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
431 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); 453 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
432 454
433 DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir); 455 DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
456 DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
434 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); 457 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
435 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); 458 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
436 459
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index 5cb573383ee..5aad56ea715 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -294,6 +294,11 @@ static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl)
294 goto out; 294 goto out;
295 } 295 }
296 296
297 wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE);
298 wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK);
299 wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI);
300 wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO);
301
297out: 302out:
298 kfree(config); 303 kfree(config);
299 return ret; 304 return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h
index b3b25ec885e..269cefb3e7d 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl12xx/wl1251_init.h
@@ -26,6 +26,53 @@
26 26
27#include "wl1251.h" 27#include "wl1251.h"
28 28
29enum {
30 /* best effort/legacy */
31 AC_BE = 0,
32
33 /* background */
34 AC_BK = 1,
35
36 /* video */
37 AC_VI = 2,
38
39 /* voice */
40 AC_VO = 3,
41
42 /* broadcast dummy access category */
43 AC_BCAST = 4,
44
45 NUM_ACCESS_CATEGORIES = 4
46};
47
48/* following are defult values for the IE fields*/
49#define CWMIN_BK 15
50#define CWMIN_BE 15
51#define CWMIN_VI 7
52#define CWMIN_VO 3
53#define CWMAX_BK 1023
54#define CWMAX_BE 63
55#define CWMAX_VI 15
56#define CWMAX_VO 7
57
58/* slot number setting to start transmission at PIFS interval */
59#define AIFS_PIFS 1
60
61/*
62 * slot number setting to start transmission at DIFS interval - normal DCF
63 * access
64 */
65#define AIFS_DIFS 2
66
67#define AIFSN_BK 7
68#define AIFSN_BE 3
69#define AIFSN_VI AIFS_PIFS
70#define AIFSN_VO AIFS_PIFS
71#define TXOP_BK 0
72#define TXOP_BE 0
73#define TXOP_VI 3008
74#define TXOP_VO 1504
75
29int wl1251_hw_init_hwenc_config(struct wl1251 *wl); 76int wl1251_hw_init_hwenc_config(struct wl1251 *wl);
30int wl1251_hw_init_templates_config(struct wl1251 *wl); 77int wl1251_hw_init_templates_config(struct wl1251 *wl);
31int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter); 78int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 2f50a256efa..595f0f94d16 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -395,6 +395,7 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
395 * the queue here, otherwise the queue will get too long. 395 * the queue here, otherwise the queue will get too long.
396 */ 396 */
397 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) { 397 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) {
398 wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
398 ieee80211_stop_queues(wl->hw); 399 ieee80211_stop_queues(wl->hw);
399 400
400 /* 401 /*
@@ -510,13 +511,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
510} 511}
511 512
512static int wl1251_op_add_interface(struct ieee80211_hw *hw, 513static int wl1251_op_add_interface(struct ieee80211_hw *hw,
513 struct ieee80211_if_init_conf *conf) 514 struct ieee80211_vif *vif)
514{ 515{
515 struct wl1251 *wl = hw->priv; 516 struct wl1251 *wl = hw->priv;
516 int ret = 0; 517 int ret = 0;
517 518
518 wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 519 wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
519 conf->type, conf->mac_addr); 520 vif->type, vif->addr);
520 521
521 mutex_lock(&wl->mutex); 522 mutex_lock(&wl->mutex);
522 if (wl->vif) { 523 if (wl->vif) {
@@ -524,9 +525,9 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
524 goto out; 525 goto out;
525 } 526 }
526 527
527 wl->vif = conf->vif; 528 wl->vif = vif;
528 529
529 switch (conf->type) { 530 switch (vif->type) {
530 case NL80211_IFTYPE_STATION: 531 case NL80211_IFTYPE_STATION:
531 wl->bss_type = BSS_TYPE_STA_BSS; 532 wl->bss_type = BSS_TYPE_STA_BSS;
532 break; 533 break;
@@ -538,8 +539,8 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
538 goto out; 539 goto out;
539 } 540 }
540 541
541 if (memcmp(wl->mac_addr, conf->mac_addr, ETH_ALEN)) { 542 if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
542 memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); 543 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
543 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); 544 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
544 ret = wl1251_acx_station_id(wl); 545 ret = wl1251_acx_station_id(wl);
545 if (ret < 0) 546 if (ret < 0)
@@ -552,7 +553,7 @@ out:
552} 553}
553 554
554static void wl1251_op_remove_interface(struct ieee80211_hw *hw, 555static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
555 struct ieee80211_if_init_conf *conf) 556 struct ieee80211_vif *vif)
556{ 557{
557 struct wl1251 *wl = hw->priv; 558 struct wl1251 *wl = hw->priv;
558 559
@@ -562,43 +563,25 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
562 mutex_unlock(&wl->mutex); 563 mutex_unlock(&wl->mutex);
563} 564}
564 565
565static int wl1251_build_null_data(struct wl1251 *wl) 566static int wl1251_build_qos_null_data(struct wl1251 *wl)
566{ 567{
567 struct wl12xx_null_data_template template; 568 struct ieee80211_qos_hdr template;
568 569
569 if (!is_zero_ether_addr(wl->bssid)) { 570 memset(&template, 0, sizeof(template));
570 memcpy(template.header.da, wl->bssid, ETH_ALEN);
571 memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
572 } else {
573 memset(template.header.da, 0xff, ETH_ALEN);
574 memset(template.header.bssid, 0xff, ETH_ALEN);
575 }
576
577 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
578 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
579 IEEE80211_STYPE_NULLFUNC |
580 IEEE80211_FCTL_TODS);
581
582 return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
583 sizeof(template));
584
585}
586
587static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
588{
589 struct wl12xx_ps_poll_template template;
590 571
591 memcpy(template.bssid, wl->bssid, ETH_ALEN); 572 memcpy(template.addr1, wl->bssid, ETH_ALEN);
592 memcpy(template.ta, wl->mac_addr, ETH_ALEN); 573 memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
574 memcpy(template.addr3, wl->bssid, ETH_ALEN);
593 575
594 /* aid in PS-Poll has its two MSBs each set to 1 */ 576 template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
595 template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid); 577 IEEE80211_STYPE_QOS_NULLFUNC |
578 IEEE80211_FCTL_TODS);
596 579
597 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 580 /* FIXME: not sure what priority to use here */
581 template.qos_ctrl = cpu_to_le16(0);
598 582
599 return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template, 583 return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
600 sizeof(template)); 584 sizeof(template));
601
602} 585}
603 586
604static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed) 587static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
@@ -640,20 +623,25 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
640 * through the bss_info_changed() hook. 623 * through the bss_info_changed() hook.
641 */ 624 */
642 ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); 625 ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
626 if (ret < 0)
627 goto out_sleep;
643 } else if (!(conf->flags & IEEE80211_CONF_PS) && 628 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
644 wl->psm_requested) { 629 wl->psm_requested) {
645 wl1251_debug(DEBUG_PSM, "psm disabled"); 630 wl1251_debug(DEBUG_PSM, "psm disabled");
646 631
647 wl->psm_requested = false; 632 wl->psm_requested = false;
648 633
649 if (wl->psm) 634 if (wl->psm) {
650 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); 635 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
636 if (ret < 0)
637 goto out_sleep;
638 }
651 } 639 }
652 640
653 if (conf->power_level != wl->power_level) { 641 if (conf->power_level != wl->power_level) {
654 ret = wl1251_acx_tx_power(wl, conf->power_level); 642 ret = wl1251_acx_tx_power(wl, conf->power_level);
655 if (ret < 0) 643 if (ret < 0)
656 goto out; 644 goto out_sleep;
657 645
658 wl->power_level = conf->power_level; 646 wl->power_level = conf->power_level;
659 } 647 }
@@ -864,199 +852,61 @@ out:
864 return ret; 852 return ret;
865} 853}
866 854
867static int wl1251_build_basic_rates(char *rates) 855static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
868{ 856 struct cfg80211_scan_request *req)
869 u8 index = 0;
870
871 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
872 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
873 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
874 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
875
876 return index;
877}
878
879static int wl1251_build_extended_rates(char *rates)
880{ 857{
881 u8 index = 0; 858 struct wl1251 *wl = hw->priv;
882 859 struct sk_buff *skb;
883 rates[index++] = IEEE80211_OFDM_RATE_6MB; 860 size_t ssid_len = 0;
884 rates[index++] = IEEE80211_OFDM_RATE_9MB; 861 u8 *ssid = NULL;
885 rates[index++] = IEEE80211_OFDM_RATE_12MB; 862 int ret;
886 rates[index++] = IEEE80211_OFDM_RATE_18MB;
887 rates[index++] = IEEE80211_OFDM_RATE_24MB;
888 rates[index++] = IEEE80211_OFDM_RATE_36MB;
889 rates[index++] = IEEE80211_OFDM_RATE_48MB;
890 rates[index++] = IEEE80211_OFDM_RATE_54MB;
891
892 return index;
893}
894
895 863
896static int wl1251_build_probe_req(struct wl1251 *wl, u8 *ssid, size_t ssid_len) 864 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
897{
898 struct wl12xx_probe_req_template template;
899 struct wl12xx_ie_rates *rates;
900 char *ptr;
901 u16 size;
902
903 ptr = (char *)&template;
904 size = sizeof(struct ieee80211_header);
905
906 memset(template.header.da, 0xff, ETH_ALEN);
907 memset(template.header.bssid, 0xff, ETH_ALEN);
908 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
909 template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
910
911 /* IEs */
912 /* SSID */
913 template.ssid.header.id = WLAN_EID_SSID;
914 template.ssid.header.len = ssid_len;
915 if (ssid_len && ssid)
916 memcpy(template.ssid.ssid, ssid, ssid_len);
917 size += sizeof(struct wl12xx_ie_header) + ssid_len;
918 ptr += size;
919
920 /* Basic Rates */
921 rates = (struct wl12xx_ie_rates *)ptr;
922 rates->header.id = WLAN_EID_SUPP_RATES;
923 rates->header.len = wl1251_build_basic_rates(rates->rates);
924 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
925 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
926
927 /* Extended rates */
928 rates = (struct wl12xx_ie_rates *)ptr;
929 rates->header.id = WLAN_EID_EXT_SUPP_RATES;
930 rates->header.len = wl1251_build_extended_rates(rates->rates);
931 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
932
933 wl1251_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
934
935 return wl1251_cmd_template_set(wl, CMD_PROBE_REQ, &template,
936 size);
937}
938 865
939static int wl1251_hw_scan(struct wl1251 *wl, u8 *ssid, size_t len, 866 if (req->n_ssids) {
940 u8 active_scan, u8 high_prio, u8 num_channels, 867 ssid = req->ssids[0].ssid;
941 u8 probe_requests) 868 ssid_len = req->ssids[0].ssid_len;
942{
943 struct wl1251_cmd_trigger_scan_to *trigger = NULL;
944 struct cmd_scan *params = NULL;
945 int i, ret;
946 u16 scan_options = 0;
947
948 if (wl->scanning)
949 return -EINVAL;
950
951 params = kzalloc(sizeof(*params), GFP_KERNEL);
952 if (!params)
953 return -ENOMEM;
954
955 params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
956 params->params.rx_filter_options =
957 cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
958
959 /* High priority scan */
960 if (!active_scan)
961 scan_options |= SCAN_PASSIVE;
962 if (high_prio)
963 scan_options |= SCAN_PRIORITY_HIGH;
964 params->params.scan_options = scan_options;
965
966 params->params.num_channels = num_channels;
967 params->params.num_probe_requests = probe_requests;
968 params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
969 params->params.tid_trigger = 0;
970
971 for (i = 0; i < num_channels; i++) {
972 params->channels[i].min_duration = cpu_to_le32(30000);
973 params->channels[i].max_duration = cpu_to_le32(60000);
974 memset(&params->channels[i].bssid_lsb, 0xff, 4);
975 memset(&params->channels[i].bssid_msb, 0xff, 2);
976 params->channels[i].early_termination = 0;
977 params->channels[i].tx_power_att = 0;
978 params->channels[i].channel = i + 1;
979 memset(params->channels[i].pad, 0, 3);
980 } 869 }
981 870
982 for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++) 871 mutex_lock(&wl->mutex);
983 memset(&params->channels[i], 0,
984 sizeof(struct basic_scan_channel_parameters));
985
986 if (len && ssid) {
987 params->params.ssid_len = len;
988 memcpy(params->params.ssid, ssid, len);
989 } else {
990 params->params.ssid_len = 0;
991 memset(params->params.ssid, 0, 32);
992 }
993 872
994 ret = wl1251_build_probe_req(wl, ssid, len); 873 if (wl->scanning) {
995 if (ret < 0) { 874 wl1251_debug(DEBUG_SCAN, "scan already in progress");
996 wl1251_error("PROBE request template failed"); 875 ret = -EINVAL;
997 goto out; 876 goto out;
998 } 877 }
999 878
1000 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); 879 ret = wl1251_ps_elp_wakeup(wl);
1001 if (!trigger) 880 if (ret < 0)
1002 goto out; 881 goto out;
1003 882
1004 trigger->timeout = 0; 883 skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
1005 884 req->ie, req->ie_len);
1006 ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, 885 if (!skb) {
1007 sizeof(*trigger)); 886 ret = -ENOMEM;
1008 if (ret < 0) {
1009 wl1251_error("trigger scan to failed for hw scan");
1010 goto out; 887 goto out;
1011 } 888 }
1012 889
1013 wl1251_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); 890 ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
1014 891 skb->len);
1015 wl->scanning = true; 892 dev_kfree_skb(skb);
893 if (ret < 0)
894 goto out_sleep;
1016 895
1017 ret = wl1251_cmd_send(wl, CMD_SCAN, params, sizeof(*params)); 896 ret = wl1251_cmd_trigger_scan_to(wl, 0);
1018 if (ret < 0) 897 if (ret < 0)
1019 wl1251_error("SCAN failed"); 898 goto out_sleep;
1020 899
1021 wl1251_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params)); 900 wl->scanning = true;
1022 901
1023 if (params->header.status != CMD_STATUS_SUCCESS) { 902 ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
1024 wl1251_error("TEST command answer error: %d", 903 req->n_channels, WL1251_SCAN_NUM_PROBES);
1025 params->header.status); 904 if (ret < 0) {
1026 wl->scanning = false; 905 wl->scanning = false;
1027 ret = -EIO; 906 goto out_sleep;
1028 goto out;
1029 }
1030
1031out:
1032 kfree(params);
1033 return ret;
1034
1035}
1036
1037static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
1038 struct cfg80211_scan_request *req)
1039{
1040 struct wl1251 *wl = hw->priv;
1041 int ret;
1042 u8 *ssid = NULL;
1043 size_t ssid_len = 0;
1044
1045 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
1046
1047 if (req->n_ssids) {
1048 ssid = req->ssids[0].ssid;
1049 ssid_len = req->ssids[0].ssid_len;
1050 } 907 }
1051 908
1052 mutex_lock(&wl->mutex); 909out_sleep:
1053
1054 ret = wl1251_ps_elp_wakeup(wl);
1055 if (ret < 0)
1056 goto out;
1057
1058 ret = wl1251_hw_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
1059
1060 wl1251_ps_elp_sleep(wl); 910 wl1251_ps_elp_sleep(wl);
1061 911
1062out: 912out:
@@ -1095,7 +945,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1095{ 945{
1096 enum wl1251_cmd_ps_mode mode; 946 enum wl1251_cmd_ps_mode mode;
1097 struct wl1251 *wl = hw->priv; 947 struct wl1251 *wl = hw->priv;
1098 struct sk_buff *beacon; 948 struct sk_buff *beacon, *skb;
1099 int ret; 949 int ret;
1100 950
1101 wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed"); 951 wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1109,7 +959,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1109 if (changed & BSS_CHANGED_BSSID) { 959 if (changed & BSS_CHANGED_BSSID) {
1110 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 960 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1111 961
1112 ret = wl1251_build_null_data(wl); 962 skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
963 if (!skb)
964 goto out_sleep;
965
966 ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
967 skb->data, skb->len);
968 dev_kfree_skb(skb);
969 if (ret < 0)
970 goto out_sleep;
971
972 ret = wl1251_build_qos_null_data(wl);
1113 if (ret < 0) 973 if (ret < 0)
1114 goto out; 974 goto out;
1115 975
@@ -1130,7 +990,14 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1130 wl->dtim_period); 990 wl->dtim_period);
1131 wl->aid = bss_conf->aid; 991 wl->aid = bss_conf->aid;
1132 992
1133 ret = wl1251_build_ps_poll(wl, wl->aid); 993 skb = ieee80211_pspoll_get(wl->hw, wl->vif);
994 if (!skb)
995 goto out_sleep;
996
997 ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
998 skb->data,
999 skb->len);
1000 dev_kfree_skb(skb);
1134 if (ret < 0) 1001 if (ret < 0)
1135 goto out_sleep; 1002 goto out_sleep;
1136 1003
@@ -1176,7 +1043,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1176 ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE); 1043 ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
1177 if (ret < 0) { 1044 if (ret < 0) {
1178 wl1251_warning("Set ctsprotect failed %d", ret); 1045 wl1251_warning("Set ctsprotect failed %d", ret);
1179 goto out; 1046 goto out_sleep;
1180 } 1047 }
1181 } 1048 }
1182 1049
@@ -1187,7 +1054,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1187 1054
1188 if (ret < 0) { 1055 if (ret < 0) {
1189 dev_kfree_skb(beacon); 1056 dev_kfree_skb(beacon);
1190 goto out; 1057 goto out_sleep;
1191 } 1058 }
1192 1059
1193 ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data, 1060 ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
@@ -1196,13 +1063,13 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1196 dev_kfree_skb(beacon); 1063 dev_kfree_skb(beacon);
1197 1064
1198 if (ret < 0) 1065 if (ret < 0)
1199 goto out; 1066 goto out_sleep;
1200 1067
1201 ret = wl1251_join(wl, wl->bss_type, wl->beacon_int, 1068 ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
1202 wl->channel, wl->dtim_period); 1069 wl->channel, wl->dtim_period);
1203 1070
1204 if (ret < 0) 1071 if (ret < 0)
1205 goto out; 1072 goto out_sleep;
1206 } 1073 }
1207 1074
1208out_sleep: 1075out_sleep:
@@ -1273,6 +1140,48 @@ static struct ieee80211_channel wl1251_channels[] = {
1273 { .hw_value = 13, .center_freq = 2472}, 1140 { .hw_value = 13, .center_freq = 2472},
1274}; 1141};
1275 1142
1143static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1144 const struct ieee80211_tx_queue_params *params)
1145{
1146 enum wl1251_acx_ps_scheme ps_scheme;
1147 struct wl1251 *wl = hw->priv;
1148 int ret;
1149
1150 mutex_lock(&wl->mutex);
1151
1152 wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
1153
1154 ret = wl1251_ps_elp_wakeup(wl);
1155 if (ret < 0)
1156 goto out;
1157
1158 ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
1159 params->cw_min, params->cw_max,
1160 params->aifs, params->txop);
1161 if (ret < 0)
1162 goto out_sleep;
1163
1164 if (params->uapsd)
1165 ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
1166 else
1167 ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
1168
1169 ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
1170 CHANNEL_TYPE_EDCF,
1171 wl1251_tx_get_queue(queue), ps_scheme,
1172 WL1251_ACX_ACK_POLICY_LEGACY);
1173 if (ret < 0)
1174 goto out_sleep;
1175
1176out_sleep:
1177 wl1251_ps_elp_sleep(wl);
1178
1179out:
1180 mutex_unlock(&wl->mutex);
1181
1182 return ret;
1183}
1184
1276/* can't be const, mac80211 writes to this */ 1185/* can't be const, mac80211 writes to this */
1277static struct ieee80211_supported_band wl1251_band_2ghz = { 1186static struct ieee80211_supported_band wl1251_band_2ghz = {
1278 .channels = wl1251_channels, 1187 .channels = wl1251_channels,
@@ -1293,6 +1202,7 @@ static const struct ieee80211_ops wl1251_ops = {
1293 .hw_scan = wl1251_op_hw_scan, 1202 .hw_scan = wl1251_op_hw_scan,
1294 .bss_info_changed = wl1251_op_bss_info_changed, 1203 .bss_info_changed = wl1251_op_bss_info_changed,
1295 .set_rts_threshold = wl1251_op_set_rts_threshold, 1204 .set_rts_threshold = wl1251_op_set_rts_threshold,
1205 .conf_tx = wl1251_op_conf_tx,
1296}; 1206};
1297 1207
1298static int wl1251_register_hw(struct wl1251 *wl) 1208static int wl1251_register_hw(struct wl1251 *wl)
@@ -1332,12 +1242,15 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1332 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1242 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1333 IEEE80211_HW_NOISE_DBM | 1243 IEEE80211_HW_NOISE_DBM |
1334 IEEE80211_HW_SUPPORTS_PS | 1244 IEEE80211_HW_SUPPORTS_PS |
1335 IEEE80211_HW_BEACON_FILTER; 1245 IEEE80211_HW_BEACON_FILTER |
1246 IEEE80211_HW_SUPPORTS_UAPSD;
1336 1247
1337 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1248 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1338 wl->hw->wiphy->max_scan_ssids = 1; 1249 wl->hw->wiphy->max_scan_ssids = 1;
1339 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; 1250 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
1340 1251
1252 wl->hw->queues = 4;
1253
1341 ret = wl1251_register_hw(wl); 1254 ret = wl1251_register_hw(wl);
1342 if (ret) 1255 if (ret)
1343 goto out; 1256 goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 9931b197ff7..851dfb65e47 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -26,7 +26,8 @@
26#include "wl1251_cmd.h" 26#include "wl1251_cmd.h"
27#include "wl1251_io.h" 27#include "wl1251_io.h"
28 28
29#define WL1251_WAKEUP_TIMEOUT 2000 29/* in ms */
30#define WL1251_WAKEUP_TIMEOUT 100
30 31
31void wl1251_elp_work(struct work_struct *work) 32void wl1251_elp_work(struct work_struct *work)
32{ 33{
@@ -67,7 +68,7 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
67 68
68int wl1251_ps_elp_wakeup(struct wl1251 *wl) 69int wl1251_ps_elp_wakeup(struct wl1251 *wl)
69{ 70{
70 unsigned long timeout; 71 unsigned long timeout, start;
71 u32 elp_reg; 72 u32 elp_reg;
72 73
73 if (!wl->elp) 74 if (!wl->elp)
@@ -75,6 +76,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
75 76
76 wl1251_debug(DEBUG_PSM, "waking up chip from elp"); 77 wl1251_debug(DEBUG_PSM, "waking up chip from elp");
77 78
79 start = jiffies;
78 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT); 80 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
79 81
80 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 82 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
@@ -95,8 +97,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
95 } 97 }
96 98
97 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms", 99 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
98 jiffies_to_msecs(jiffies) - 100 jiffies_to_msecs(jiffies - start));
99 (jiffies_to_msecs(timeout) - WL1251_WAKEUP_TIMEOUT));
100 101
101 wl->elp = false; 102 wl->elp = false;
102 103
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index f84cc89cbff..b56732226cc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -126,7 +126,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
126 if (wl->rx_current_buffer) 126 if (wl->rx_current_buffer)
127 rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size; 127 rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
128 128
129 skb = dev_alloc_skb(length); 129 skb = __dev_alloc_skb(length, GFP_KERNEL);
130 if (!skb) { 130 if (!skb) {
131 wl1251_error("Couldn't allocate RX frame"); 131 wl1251_error("Couldn't allocate RX frame");
132 return; 132 return;
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index f8597061584..c8223185efd 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -167,8 +167,7 @@ static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
167 tx_hdr->expiry_time = cpu_to_le32(1 << 16); 167 tx_hdr->expiry_time = cpu_to_le32(1 << 16);
168 tx_hdr->id = id; 168 tx_hdr->id = id;
169 169
170 /* FIXME: how to get the correct queue id? */ 170 tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
171 tx_hdr->xmit_queue = 0;
172 171
173 wl1251_tx_control(tx_hdr, control, fc); 172 wl1251_tx_control(tx_hdr, control, fc);
174 wl1251_tx_frag_block_num(tx_hdr); 173 wl1251_tx_frag_block_num(tx_hdr);
@@ -220,6 +219,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
220 /* align the buffer on a 4-byte boundary */ 219 /* align the buffer on a 4-byte boundary */
221 skb_reserve(skb, offset); 220 skb_reserve(skb, offset);
222 memmove(skb->data, src, skb->len); 221 memmove(skb->data, src, skb->len);
222 tx_hdr = (struct tx_double_buffer_desc *) skb->data;
223 } else { 223 } else {
224 wl1251_info("No handler, fixme!"); 224 wl1251_info("No handler, fixme!");
225 return -EINVAL; 225 return -EINVAL;
@@ -237,8 +237,9 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
237 237
238 wl1251_mem_write(wl, addr, skb->data, len); 238 wl1251_mem_write(wl, addr, skb->data, len);
239 239
240 wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x", 240 wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
241 tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate); 241 "queue %d", tx_hdr->id, skb, tx_hdr->length,
242 tx_hdr->rate, tx_hdr->xmit_queue);
242 243
243 return 0; 244 return 0;
244} 245}
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 7c1c1665c81..55856c6bb97 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -26,6 +26,7 @@
26#define __WL1251_TX_H__ 26#define __WL1251_TX_H__
27 27
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include "wl1251_acx.h"
29 30
30/* 31/*
31 * 32 *
@@ -209,6 +210,22 @@ struct tx_result {
209 u8 done_2; 210 u8 done_2;
210} __attribute__ ((packed)); 211} __attribute__ ((packed));
211 212
213static inline int wl1251_tx_get_queue(int queue)
214{
215 switch (queue) {
216 case 0:
217 return QOS_AC_VO;
218 case 1:
219 return QOS_AC_VI;
220 case 2:
221 return QOS_AC_BE;
222 case 3:
223 return QOS_AC_BK;
224 default:
225 return QOS_AC_BE;
226 }
227}
228
212void wl1251_tx_work(struct work_struct *work); 229void wl1251_tx_work(struct work_struct *work);
213void wl1251_tx_complete(struct wl1251 *wl); 230void wl1251_tx_complete(struct wl1251 *wl);
214void wl1251_tx_flush(struct wl1251 *wl); 231void wl1251_tx_flush(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 94359b1a861..d0938db043b 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -107,10 +107,9 @@ enum {
107 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ 107 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
108 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) 108 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
109 109
110#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
111
112#define WL1271_FW_NAME "wl1271-fw.bin" 110#define WL1271_FW_NAME "wl1271-fw.bin"
113#define WL1271_NVS_NAME "wl1271-nvs.bin" 111#define WL1271_NVS_NAME "wl1271-nvs.bin"
112#define WL1271_NVS_LEN 468
114 113
115/* 114/*
116 * Enable/disable 802.11a support for WL1273 115 * Enable/disable 802.11a support for WL1273
@@ -276,6 +275,7 @@ struct wl1271_debugfs {
276 275
277 struct dentry *retry_count; 276 struct dentry *retry_count;
278 struct dentry *excessive_retries; 277 struct dentry *excessive_retries;
278 struct dentry *gpio_power;
279}; 279};
280 280
281#define NUM_TX_QUEUES 4 281#define NUM_TX_QUEUES 4
@@ -322,6 +322,17 @@ struct wl1271 {
322 enum wl1271_state state; 322 enum wl1271_state state;
323 struct mutex mutex; 323 struct mutex mutex;
324 324
325#define WL1271_FLAG_STA_RATES_CHANGED (0)
326#define WL1271_FLAG_STA_ASSOCIATED (1)
327#define WL1271_FLAG_JOINED (2)
328#define WL1271_FLAG_GPIO_POWER (3)
329#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
330#define WL1271_FLAG_SCANNING (5)
331#define WL1271_FLAG_IN_ELP (6)
332#define WL1271_FLAG_PSM (7)
333#define WL1271_FLAG_PSM_REQUESTED (8)
334 unsigned long flags;
335
325 struct wl1271_partition_set part; 336 struct wl1271_partition_set part;
326 337
327 struct wl1271_chip chip; 338 struct wl1271_chip chip;
@@ -359,7 +370,6 @@ struct wl1271 {
359 370
360 /* Frames scheduled for transmission, not handled yet */ 371 /* Frames scheduled for transmission, not handled yet */
361 struct sk_buff_head tx_queue; 372 struct sk_buff_head tx_queue;
362 bool tx_queue_stopped;
363 373
364 struct work_struct tx_work; 374 struct work_struct tx_work;
365 375
@@ -387,14 +397,15 @@ struct wl1271 {
387 u32 mbox_ptr[2]; 397 u32 mbox_ptr[2];
388 398
389 /* Are we currently scanning */ 399 /* Are we currently scanning */
390 bool scanning;
391 struct wl1271_scan scan; 400 struct wl1271_scan scan;
392 401
393 /* Our association ID */ 402 /* Our association ID */
394 u16 aid; 403 u16 aid;
395 404
396 /* currently configured rate set */ 405 /* currently configured rate set */
406 u32 sta_rate_set;
397 u32 basic_rate_set; 407 u32 basic_rate_set;
408 u32 rate_set;
398 409
399 /* The current band */ 410 /* The current band */
400 enum ieee80211_band band; 411 enum ieee80211_band band;
@@ -405,18 +416,9 @@ struct wl1271 {
405 unsigned int rx_config; 416 unsigned int rx_config;
406 unsigned int rx_filter; 417 unsigned int rx_filter;
407 418
408 /* is firmware in elp mode */
409 bool elp;
410
411 struct completion *elp_compl; 419 struct completion *elp_compl;
412 struct delayed_work elp_work; 420 struct delayed_work elp_work;
413 421
414 /* we can be in psm, but not in elp, we have to differentiate */
415 bool psm;
416
417 /* PSM mode requested */
418 bool psm_requested;
419
420 /* retry counter for PSM entries */ 422 /* retry counter for PSM entries */
421 u8 psm_entry_retry; 423 u8 psm_entry_retry;
422 424
@@ -435,9 +437,6 @@ struct wl1271 {
435 437
436 struct ieee80211_vif *vif; 438 struct ieee80211_vif *vif;
437 439
438 /* Used for a workaround to send disconnect before rejoining */
439 bool joined;
440
441 /* Current chipset configuration */ 440 /* Current chipset configuration */
442 struct conf_drv_settings conf; 441 struct conf_drv_settings conf;
443 442
@@ -455,7 +454,9 @@ int wl1271_plt_stop(struct wl1271 *wl);
455 454
456#define WL1271_TX_QUEUE_MAX_LENGTH 20 455#define WL1271_TX_QUEUE_MAX_LENGTH 20
457 456
458/* WL1271 needs a 200ms sleep after power on */ 457/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
458 on in case is has been shut down shortly before */
459#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
459#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ 460#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
460 461
461static inline bool wl1271_11a_enabled(void) 462static inline bool wl1271_11a_enabled(void)
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 5cc89bbdac7..0b343484347 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -390,6 +390,35 @@ out:
390 return ret; 390 return ret;
391} 391}
392 392
393int wl1271_acx_dco_itrim_params(struct wl1271 *wl)
394{
395 struct acx_dco_itrim_params *dco;
396 struct conf_itrim_settings *c = &wl->conf.itrim;
397 int ret;
398
399 wl1271_debug(DEBUG_ACX, "acx dco itrim parameters");
400
401 dco = kzalloc(sizeof(*dco), GFP_KERNEL);
402 if (!dco) {
403 ret = -ENOMEM;
404 goto out;
405 }
406
407 dco->enable = c->enable;
408 dco->timeout = cpu_to_le32(c->timeout);
409
410 ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS,
411 dco, sizeof(*dco));
412 if (ret < 0) {
413 wl1271_warning("failed to set dco itrim parameters: %d", ret);
414 goto out;
415 }
416
417out:
418 kfree(dco);
419 return ret;
420}
421
393int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter) 422int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
394{ 423{
395 struct acx_beacon_filter_option *beacon_filter = NULL; 424 struct acx_beacon_filter_option *beacon_filter = NULL;
@@ -758,10 +787,11 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
758 return 0; 787 return 0;
759} 788}
760 789
761int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates) 790int wl1271_acx_rate_policies(struct wl1271 *wl)
762{ 791{
763 struct acx_rate_policy *acx; 792 struct acx_rate_policy *acx;
764 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf; 793 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
794 int idx = 0;
765 int ret = 0; 795 int ret = 0;
766 796
767 wl1271_debug(DEBUG_ACX, "acx rate policies"); 797 wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -773,12 +803,21 @@ int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
773 goto out; 803 goto out;
774 } 804 }
775 805
776 /* configure one default (one-size-fits-all) rate class */ 806 /* configure one basic rate class */
777 acx->rate_class_cnt = cpu_to_le32(1); 807 idx = ACX_TX_BASIC_RATE;
778 acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates); 808 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
779 acx->rate_class[0].short_retry_limit = c->short_retry_limit; 809 acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
780 acx->rate_class[0].long_retry_limit = c->long_retry_limit; 810 acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
781 acx->rate_class[0].aflags = c->aflags; 811 acx->rate_class[idx].aflags = c->aflags;
812
813 /* configure one AP supported rate class */
814 idx = ACX_TX_AP_FULL_RATE;
815 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set);
816 acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
817 acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
818 acx->rate_class[idx].aflags = c->aflags;
819
820 acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
782 821
783 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); 822 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
784 if (ret < 0) { 823 if (ret < 0) {
@@ -1012,59 +1051,6 @@ out:
1012 return ret; 1051 return ret;
1013} 1052}
1014 1053
1015int wl1271_acx_smart_reflex(struct wl1271 *wl)
1016{
1017 struct acx_smart_reflex_state *sr_state = NULL;
1018 struct acx_smart_reflex_config_params *sr_param = NULL;
1019 int i, ret;
1020
1021 wl1271_debug(DEBUG_ACX, "acx smart reflex");
1022
1023 sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
1024 if (!sr_param) {
1025 ret = -ENOMEM;
1026 goto out;
1027 }
1028
1029 for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
1030 struct conf_mart_reflex_err_table *e =
1031 &(wl->conf.init.sr_err_tbl[i]);
1032
1033 sr_param->error_table[i].len = e->len;
1034 sr_param->error_table[i].upper_limit = e->upper_limit;
1035 memcpy(sr_param->error_table[i].values, e->values, e->len);
1036 }
1037
1038 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
1039 sr_param, sizeof(*sr_param));
1040 if (ret < 0) {
1041 wl1271_warning("failed to set smart reflex params: %d", ret);
1042 goto out;
1043 }
1044
1045 sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
1046 if (!sr_state) {
1047 ret = -ENOMEM;
1048 goto out;
1049 }
1050
1051 /* enable smart reflex */
1052 sr_state->enable = wl->conf.init.sr_enable;
1053
1054 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
1055 sr_state, sizeof(*sr_state));
1056 if (ret < 0) {
1057 wl1271_warning("failed to set smart reflex params: %d", ret);
1058 goto out;
1059 }
1060
1061out:
1062 kfree(sr_state);
1063 kfree(sr_param);
1064 return ret;
1065
1066}
1067
1068int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable) 1054int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
1069{ 1055{
1070 struct wl1271_acx_bet_enable *acx = NULL; 1056 struct wl1271_acx_bet_enable *acx = NULL;
@@ -1132,3 +1118,31 @@ out:
1132 kfree(acx); 1118 kfree(acx);
1133 return ret; 1119 return ret;
1134} 1120}
1121
1122int wl1271_acx_pm_config(struct wl1271 *wl)
1123{
1124 struct wl1271_acx_pm_config *acx = NULL;
1125 struct conf_pm_config_settings *c = &wl->conf.pm_config;
1126 int ret = 0;
1127
1128 wl1271_debug(DEBUG_ACX, "acx pm config");
1129
1130 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1131 if (!acx) {
1132 ret = -ENOMEM;
1133 goto out;
1134 }
1135
1136 acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time);
1137 acx->host_fast_wakeup_support = c->host_fast_wakeup_support;
1138
1139 ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx));
1140 if (ret < 0) {
1141 wl1271_warning("acx pm config failed: %d", ret);
1142 goto out;
1143 }
1144
1145out:
1146 kfree(acx);
1147 return ret;
1148}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 2ce0a812854..1bb63af64f0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -415,23 +415,12 @@ struct acx_bt_wlan_coex {
415 u8 pad[3]; 415 u8 pad[3];
416} __attribute__ ((packed)); 416} __attribute__ ((packed));
417 417
418struct acx_smart_reflex_state { 418struct acx_dco_itrim_params {
419 struct acx_header header; 419 struct acx_header header;
420 420
421 u8 enable; 421 u8 enable;
422 u8 padding[3]; 422 u8 padding[3];
423} __attribute__ ((packed)); 423 __le32 timeout;
424
425struct smart_reflex_err_table {
426 u8 len;
427 s8 upper_limit;
428 s8 values[14];
429} __attribute__ ((packed));
430
431struct acx_smart_reflex_config_params {
432 struct acx_header header;
433
434 struct smart_reflex_err_table error_table[3];
435} __attribute__ ((packed)); 424} __attribute__ ((packed));
436 425
437#define PTA_ANTENNA_TYPE_DEF (0) 426#define PTA_ANTENNA_TYPE_DEF (0)
@@ -837,6 +826,9 @@ struct acx_rate_class {
837 u8 reserved; 826 u8 reserved;
838}; 827};
839 828
829#define ACX_TX_BASIC_RATE 0
830#define ACX_TX_AP_FULL_RATE 1
831#define ACX_TX_RATE_POLICY_CNT 2
840struct acx_rate_policy { 832struct acx_rate_policy {
841 struct acx_header header; 833 struct acx_header header;
842 834
@@ -877,8 +869,8 @@ struct acx_tx_config_options {
877 __le16 tx_compl_threshold; /* number of packets */ 869 __le16 tx_compl_threshold; /* number of packets */
878} __attribute__ ((packed)); 870} __attribute__ ((packed));
879 871
880#define ACX_RX_MEM_BLOCKS 64 872#define ACX_RX_MEM_BLOCKS 70
881#define ACX_TX_MIN_MEM_BLOCKS 64 873#define ACX_TX_MIN_MEM_BLOCKS 40
882#define ACX_TX_DESCRIPTORS 32 874#define ACX_TX_DESCRIPTORS 32
883#define ACX_NUM_SSID_PROFILES 1 875#define ACX_NUM_SSID_PROFILES 1
884 876
@@ -969,6 +961,13 @@ struct wl1271_acx_arp_filter {
969 used. */ 961 used. */
970} __attribute__((packed)); 962} __attribute__((packed));
971 963
964struct wl1271_acx_pm_config {
965 struct acx_header header;
966
967 __le32 host_clk_settling_time;
968 u8 host_fast_wakeup_support;
969 u8 padding[3];
970} __attribute__ ((packed));
972 971
973enum { 972enum {
974 ACX_WAKE_UP_CONDITIONS = 0x0002, 973 ACX_WAKE_UP_CONDITIONS = 0x0002,
@@ -1027,13 +1026,13 @@ enum {
1027 ACX_HT_BSS_OPERATION = 0x0058, 1026 ACX_HT_BSS_OPERATION = 0x0058,
1028 ACX_COEX_ACTIVITY = 0x0059, 1027 ACX_COEX_ACTIVITY = 0x0059,
1029 ACX_SET_SMART_REFLEX_DEBUG = 0x005A, 1028 ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
1030 ACX_SET_SMART_REFLEX_STATE = 0x005B, 1029 ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
1031 ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
1032 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1030 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1033 DOT11_CUR_TX_PWR = 0x100D, 1031 DOT11_CUR_TX_PWR = 0x100D,
1034 DOT11_RX_DOT11_MODE = 0x1012, 1032 DOT11_RX_DOT11_MODE = 0x1012,
1035 DOT11_RTS_THRESHOLD = 0x1013, 1033 DOT11_RTS_THRESHOLD = 0x1013,
1036 DOT11_GROUP_ADDRESS_TBL = 0x1014, 1034 DOT11_GROUP_ADDRESS_TBL = 0x1014,
1035 ACX_PM_CONFIG = 0x1016,
1037 1036
1038 MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, 1037 MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
1039 1038
@@ -1056,6 +1055,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
1056 void *mc_list, u32 mc_list_len); 1055 void *mc_list, u32 mc_list_len);
1057int wl1271_acx_service_period_timeout(struct wl1271 *wl); 1056int wl1271_acx_service_period_timeout(struct wl1271 *wl);
1058int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold); 1057int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
1058int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); 1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1061int wl1271_acx_conn_monit_params(struct wl1271 *wl); 1061int wl1271_acx_conn_monit_params(struct wl1271 *wl);
@@ -1069,7 +1069,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
1069int wl1271_acx_cts_protect(struct wl1271 *wl, 1069int wl1271_acx_cts_protect(struct wl1271 *wl,
1070 enum acx_ctsprotect_type ctsprotect); 1070 enum acx_ctsprotect_type ctsprotect);
1071int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1071int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
1072int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates); 1072int wl1271_acx_rate_policies(struct wl1271 *wl);
1073int wl1271_acx_ac_cfg(struct wl1271 *wl); 1073int wl1271_acx_ac_cfg(struct wl1271 *wl);
1074int wl1271_acx_tid_cfg(struct wl1271 *wl); 1074int wl1271_acx_tid_cfg(struct wl1271 *wl);
1075int wl1271_acx_frag_threshold(struct wl1271 *wl); 1075int wl1271_acx_frag_threshold(struct wl1271 *wl);
@@ -1081,5 +1081,6 @@ int wl1271_acx_smart_reflex(struct wl1271 *wl);
1081int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); 1081int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1082int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, 1082int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1083 u8 version); 1083 u8 version);
1084int wl1271_acx_pm_config(struct wl1271 *wl);
1084 1085
1085#endif /* __WL1271_ACX_H__ */ 1086#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index b7c96454cca..e803b876f3f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -225,9 +225,15 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
225 if (nvs == NULL) 225 if (nvs == NULL)
226 return -ENODEV; 226 return -ENODEV;
227 227
228 if (wl->nvs_len < WL1271_NVS_LEN)
229 return -EINVAL;
230
228 nvs_ptr = nvs; 231 nvs_ptr = nvs;
229 232
230 nvs_len = wl->nvs_len; 233 /* only the first part of the NVS needs to be uploaded */
234 nvs_len = WL1271_NVS_LEN;
235
236 /* FIXME: read init settings from the remaining part of the NVS */
231 237
232 /* Update the device MAC address into the nvs */ 238 /* Update the device MAC address into the nvs */
233 nvs[11] = wl->mac_addr[0]; 239 nvs[11] = wl->mac_addr[0];
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index c3385b3d246..a74259bb596 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -209,6 +209,26 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
209 gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer; 209 gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
210 gen_parms->settings = g->settings; 210 gen_parms->settings = g->settings;
211 211
212 gen_parms->sr_state = g->sr_state;
213
214 memcpy(gen_parms->srf1,
215 g->srf1,
216 CONF_MAX_SMART_REFLEX_PARAMS);
217 memcpy(gen_parms->srf2,
218 g->srf2,
219 CONF_MAX_SMART_REFLEX_PARAMS);
220 memcpy(gen_parms->srf3,
221 g->srf3,
222 CONF_MAX_SMART_REFLEX_PARAMS);
223 memcpy(gen_parms->sr_debug_table,
224 g->sr_debug_table,
225 CONF_MAX_SMART_REFLEX_PARAMS);
226
227 gen_parms->sr_sen_n_p = g->sr_sen_n_p;
228 gen_parms->sr_sen_n_p_gain = g->sr_sen_n_p_gain;
229 gen_parms->sr_sen_nrn = g->sr_sen_nrn;
230 gen_parms->sr_sen_prn = g->sr_sen_prn;
231
212 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); 232 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
213 if (ret < 0) 233 if (ret < 0)
214 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); 234 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
@@ -253,6 +273,8 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
253 CONF_NUMBER_OF_RATE_GROUPS); 273 CONF_NUMBER_OF_RATE_GROUPS);
254 memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded, 274 memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
255 CONF_NUMBER_OF_RATE_GROUPS); 275 CONF_NUMBER_OF_RATE_GROUPS);
276 memcpy(radio_parms->tx_rate_limits_extreme, r->tx_rate_limits_extreme,
277 CONF_NUMBER_OF_RATE_GROUPS);
256 278
257 memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b, 279 memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
258 CONF_NUMBER_OF_CHANNELS_2_4); 280 CONF_NUMBER_OF_CHANNELS_2_4);
@@ -263,6 +285,11 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
263 memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS); 285 memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
264 286
265 radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss; 287 radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
288 radio_parms->degraded_low_to_normal_threshold =
289 r->degraded_low_to_normal_threshold;
290 radio_parms->degraded_normal_to_high_threshold =
291 r->degraded_normal_to_high_threshold;
292
266 293
267 for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++) 294 for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
268 radio_parms->tx_ref_pd_voltage_5[i] = 295 radio_parms->tx_ref_pd_voltage_5[i] =
@@ -275,6 +302,8 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
275 r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS); 302 r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
276 memcpy(radio_parms->tx_rate_limits_degraded_5, 303 memcpy(radio_parms->tx_rate_limits_degraded_5,
277 r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS); 304 r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
305 memcpy(radio_parms->tx_rate_limits_extreme_5,
306 r->tx_rate_limits_extreme_5, CONF_NUMBER_OF_RATE_GROUPS);
278 memcpy(radio_parms->tx_channel_limits_ofdm_5, 307 memcpy(radio_parms->tx_channel_limits_ofdm_5,
279 r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5); 308 r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
280 memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5, 309 memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
@@ -283,6 +312,10 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
283 CONF_NUMBER_OF_RATE_GROUPS); 312 CONF_NUMBER_OF_RATE_GROUPS);
284 memcpy(radio_parms->rx_fem_insertion_loss_5, 313 memcpy(radio_parms->rx_fem_insertion_loss_5,
285 r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5); 314 r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
315 radio_parms->degraded_low_to_normal_threshold_5 =
316 r->degraded_low_to_normal_threshold_5;
317 radio_parms->degraded_normal_to_high_threshold_5 =
318 r->degraded_normal_to_high_threshold_5;
286 319
287 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", 320 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
288 radio_parms, sizeof(*radio_parms)); 321 radio_parms, sizeof(*radio_parms));
@@ -311,19 +344,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
311 do_cal = false; 344 do_cal = false;
312 } 345 }
313 346
314 /* FIXME: This is a workaround, because with the current stack, we
315 * cannot know when we have disassociated. So, if we have already
316 * joined, we disconnect before joining again. */
317 if (wl->joined) {
318 ret = wl1271_cmd_disconnect(wl);
319 if (ret < 0) {
320 wl1271_error("failed to disconnect before rejoining");
321 goto out;
322 }
323
324 wl->joined = false;
325 }
326
327 join = kzalloc(sizeof(*join), GFP_KERNEL); 347 join = kzalloc(sizeof(*join), GFP_KERNEL);
328 if (!join) { 348 if (!join) {
329 ret = -ENOMEM; 349 ret = -ENOMEM;
@@ -388,8 +408,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
388 goto out_free; 408 goto out_free;
389 } 409 }
390 410
391 wl->joined = true;
392
393 /* 411 /*
394 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to 412 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
395 * simplify locking we just sleep instead, for now 413 * simplify locking we just sleep instead, for now
@@ -487,7 +505,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
487 return 0; 505 return 0;
488} 506}
489 507
490int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable) 508int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
491{ 509{
492 struct cmd_enabledisable_path *cmd; 510 struct cmd_enabledisable_path *cmd;
493 int ret; 511 int ret;
@@ -501,7 +519,8 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
501 goto out; 519 goto out;
502 } 520 }
503 521
504 cmd->channel = channel; 522 /* the channel here is only used for calibration, so hardcoded to 1 */
523 cmd->channel = 1;
505 524
506 if (enable) { 525 if (enable) {
507 cmd_rx = CMD_ENABLE_RX; 526 cmd_rx = CMD_ENABLE_RX;
@@ -514,22 +533,22 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
514 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0); 533 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
515 if (ret < 0) { 534 if (ret < 0) {
516 wl1271_error("rx %s cmd for channel %d failed", 535 wl1271_error("rx %s cmd for channel %d failed",
517 enable ? "start" : "stop", channel); 536 enable ? "start" : "stop", cmd->channel);
518 goto out; 537 goto out;
519 } 538 }
520 539
521 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d", 540 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
522 enable ? "start" : "stop", channel); 541 enable ? "start" : "stop", cmd->channel);
523 542
524 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0); 543 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
525 if (ret < 0) { 544 if (ret < 0) {
526 wl1271_error("tx %s cmd for channel %d failed", 545 wl1271_error("tx %s cmd for channel %d failed",
527 enable ? "start" : "stop", channel); 546 enable ? "start" : "stop", cmd->channel);
528 return ret; 547 return ret;
529 } 548 }
530 549
531 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d", 550 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
532 enable ? "start" : "stop", channel); 551 enable ? "start" : "stop", cmd->channel);
533 552
534out: 553out:
535 kfree(cmd); 554 kfree(cmd);
@@ -636,7 +655,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
636 channels = wl->hw->wiphy->bands[ieee_band]->channels; 655 channels = wl->hw->wiphy->bands[ieee_band]->channels;
637 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels; 656 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
638 657
639 if (wl->scanning) 658 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags))
640 return -EINVAL; 659 return -EINVAL;
641 660
642 params = kzalloc(sizeof(*params), GFP_KERNEL); 661 params = kzalloc(sizeof(*params), GFP_KERNEL);
@@ -711,7 +730,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
711 730
712 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); 731 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
713 732
714 wl->scanning = true; 733 set_bit(WL1271_FLAG_SCANNING, &wl->flags);
715 if (wl1271_11a_enabled()) { 734 if (wl1271_11a_enabled()) {
716 wl->scan.state = band; 735 wl->scan.state = band;
717 if (band == WL1271_SCAN_BAND_DUAL) { 736 if (band == WL1271_SCAN_BAND_DUAL) {
@@ -729,7 +748,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
729 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0); 748 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
730 if (ret < 0) { 749 if (ret < 0) {
731 wl1271_error("SCAN failed"); 750 wl1271_error("SCAN failed");
732 wl->scanning = false; 751 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
733 goto out; 752 goto out;
734 } 753 }
735 754
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index b4fa4acb922..09fe91297ac 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -37,7 +37,7 @@ int wl1271_cmd_join(struct wl1271 *wl);
37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
40int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable); 40int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode); 41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
43 size_t len); 43 size_t len);
@@ -437,6 +437,21 @@ struct wl1271_general_parms_cmd {
437 u8 tx_bip_fem_autodetect; 437 u8 tx_bip_fem_autodetect;
438 u8 tx_bip_fem_manufacturer; 438 u8 tx_bip_fem_manufacturer;
439 u8 settings; 439 u8 settings;
440
441 u8 sr_state;
442
443 s8 srf1[CONF_MAX_SMART_REFLEX_PARAMS];
444 s8 srf2[CONF_MAX_SMART_REFLEX_PARAMS];
445 s8 srf3[CONF_MAX_SMART_REFLEX_PARAMS];
446
447 s8 sr_debug_table[CONF_MAX_SMART_REFLEX_PARAMS];
448
449 u8 sr_sen_n_p;
450 u8 sr_sen_n_p_gain;
451 u8 sr_sen_nrn;
452 u8 sr_sen_prn;
453
454 u8 padding[3];
440} __attribute__ ((packed)); 455} __attribute__ ((packed));
441 456
442struct wl1271_radio_parms_cmd { 457struct wl1271_radio_parms_cmd {
@@ -458,11 +473,12 @@ struct wl1271_radio_parms_cmd {
458 /* Dynamic radio parameters */ 473 /* Dynamic radio parameters */
459 /* 2.4GHz */ 474 /* 2.4GHz */
460 __le16 tx_ref_pd_voltage; 475 __le16 tx_ref_pd_voltage;
461 s8 tx_ref_power; 476 u8 tx_ref_power;
462 s8 tx_offset_db; 477 s8 tx_offset_db;
463 478
464 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS]; 479 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
465 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS]; 480 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
481 s8 tx_rate_limits_extreme[CONF_NUMBER_OF_RATE_GROUPS];
466 482
467 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4]; 483 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
468 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4]; 484 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
@@ -471,15 +487,19 @@ struct wl1271_radio_parms_cmd {
471 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS]; 487 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
472 u8 rx_fem_insertion_loss; 488 u8 rx_fem_insertion_loss;
473 489
474 u8 padding2; 490 u8 degraded_low_to_normal_threshold;
491 u8 degraded_normal_to_high_threshold;
492
493 u8 padding1; /* our own padding, not in ref driver */
475 494
476 /* 5GHz */ 495 /* 5GHz */
477 __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5]; 496 __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
478 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5]; 497 u8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
479 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5]; 498 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
480 499
481 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS]; 500 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
482 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS]; 501 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
502 s8 tx_rate_limits_extreme_5[CONF_NUMBER_OF_RATE_GROUPS];
483 503
484 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5]; 504 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
485 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS]; 505 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
@@ -488,7 +508,10 @@ struct wl1271_radio_parms_cmd {
488 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS]; 508 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
489 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5]; 509 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
490 510
491 u8 padding3[2]; 511 u8 degraded_low_to_normal_threshold_5;
512 u8 degraded_normal_to_high_threshold_5;
513
514 u8 padding2[2];
492} __attribute__ ((packed)); 515} __attribute__ ((packed));
493 516
494struct wl1271_cmd_cal_channel_tune { 517struct wl1271_cmd_cal_channel_tune {
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 565373ede26..1993d63c214 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -258,7 +258,8 @@ struct conf_rx_settings {
258#define CONF_TX_MAX_RATE_CLASSES 8 258#define CONF_TX_MAX_RATE_CLASSES 8
259 259
260#define CONF_TX_RATE_MASK_UNSPECIFIED 0 260#define CONF_TX_RATE_MASK_UNSPECIFIED 0
261#define CONF_TX_RATE_MASK_ALL 0x1eff 261#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \
262 CONF_HW_BIT_RATE_2MBPS)
262#define CONF_TX_RATE_RETRY_LIMIT 10 263#define CONF_TX_RATE_RETRY_LIMIT 10
263 264
264struct conf_tx_rate_class { 265struct conf_tx_rate_class {
@@ -722,31 +723,6 @@ struct conf_conn_settings {
722 u8 psm_entry_retries; 723 u8 psm_entry_retries;
723}; 724};
724 725
725#define CONF_SR_ERR_TBL_MAX_VALUES 14
726
727struct conf_mart_reflex_err_table {
728 /*
729 * Length of the error table values table.
730 *
731 * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
732 */
733 u8 len;
734
735 /*
736 * Smart Reflex error table upper limit.
737 *
738 * Range: s8
739 */
740 s8 upper_limit;
741
742 /*
743 * Smart Reflex error table values.
744 *
745 * Range: s8
746 */
747 s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
748};
749
750enum { 726enum {
751 CONF_REF_CLK_19_2_E, 727 CONF_REF_CLK_19_2_E,
752 CONF_REF_CLK_26_E, 728 CONF_REF_CLK_26_E,
@@ -759,6 +735,9 @@ enum single_dual_band_enum {
759 CONF_DUAL_BAND 735 CONF_DUAL_BAND
760}; 736};
761 737
738
739#define CONF_MAX_SMART_REFLEX_PARAMS 16
740
762struct conf_general_parms { 741struct conf_general_parms {
763 /* 742 /*
764 * RF Reference Clock type / speed 743 * RF Reference Clock type / speed
@@ -815,6 +794,20 @@ struct conf_general_parms {
815 * Range: Unknown 794 * Range: Unknown
816 */ 795 */
817 u8 settings; 796 u8 settings;
797
798 /* Smart reflex settings */
799 u8 sr_state;
800
801 s8 srf1[CONF_MAX_SMART_REFLEX_PARAMS];
802 s8 srf2[CONF_MAX_SMART_REFLEX_PARAMS];
803 s8 srf3[CONF_MAX_SMART_REFLEX_PARAMS];
804
805 s8 sr_debug_table[CONF_MAX_SMART_REFLEX_PARAMS];
806
807 u8 sr_sen_n_p;
808 u8 sr_sen_n_p_gain;
809 u8 sr_sen_nrn;
810 u8 sr_sen_prn;
818}; 811};
819 812
820#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15 813#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
@@ -847,12 +840,13 @@ struct conf_radio_parms {
847 * 840 *
848 * Range: unknown 841 * Range: unknown
849 */ 842 */
850 s16 tx_ref_pd_voltage; 843 u16 tx_ref_pd_voltage;
851 s8 tx_ref_power; 844 u8 tx_ref_power;
852 s8 tx_offset_db; 845 s8 tx_offset_db;
853 846
854 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS]; 847 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
855 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS]; 848 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
849 s8 tx_rate_limits_extreme[CONF_NUMBER_OF_RATE_GROUPS];
856 850
857 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4]; 851 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
858 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4]; 852 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
@@ -861,17 +855,22 @@ struct conf_radio_parms {
861 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS]; 855 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
862 u8 rx_fem_insertion_loss; 856 u8 rx_fem_insertion_loss;
863 857
858 u8 degraded_low_to_normal_threshold;
859 u8 degraded_normal_to_high_threshold;
860
861
864 /* 862 /*
865 * Dynamic radio parameters for 5GHz 863 * Dynamic radio parameters for 5GHz
866 * 864 *
867 * Range: unknown 865 * Range: unknown
868 */ 866 */
869 s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5]; 867 u16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
870 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5]; 868 u8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
871 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5]; 869 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
872 870
873 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS]; 871 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
874 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS]; 872 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
873 s8 tx_rate_limits_extreme_5[CONF_NUMBER_OF_RATE_GROUPS];
875 874
876 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5]; 875 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
877 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS]; 876 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
@@ -879,33 +878,46 @@ struct conf_radio_parms {
879 /* FIXME: this is inconsistent with the types for 2.4GHz */ 878 /* FIXME: this is inconsistent with the types for 2.4GHz */
880 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS]; 879 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
881 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5]; 880 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
882};
883 881
884#define CONF_SR_ERR_TBL_COUNT 3 882 u8 degraded_low_to_normal_threshold_5;
883 u8 degraded_normal_to_high_threshold_5;
884};
885 885
886struct conf_init_settings { 886struct conf_init_settings {
887 /* 887 /*
888 * Configure Smart Reflex error table values. 888 * Configure general parameters.
889 */ 889 */
890 struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT]; 890 struct conf_general_parms genparam;
891 891
892 /* 892 /*
893 * Smart Reflex enable flag. 893 * Configure radio parameters.
894 *
895 * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
896 */ 894 */
897 u8 sr_enable; 895 struct conf_radio_parms radioparam;
898 896
897};
898
899struct conf_itrim_settings {
900 /* enable dco itrim */
901 u8 enable;
902
903 /* moderation timeout in microsecs from the last TX */
904 u32 timeout;
905};
906
907struct conf_pm_config_settings {
899 /* 908 /*
900 * Configure general parameters. 909 * Host clock settling time
910 *
911 * Range: 0 - 30000 us
901 */ 912 */
902 struct conf_general_parms genparam; 913 u32 host_clk_settling_time;
903 914
904 /* 915 /*
905 * Configure radio parameters. 916 * Host fast wakeup support
917 *
918 * Range: true, false
906 */ 919 */
907 struct conf_radio_parms radioparam; 920 bool host_fast_wakeup_support;
908
909}; 921};
910 922
911struct conf_drv_settings { 923struct conf_drv_settings {
@@ -914,6 +926,8 @@ struct conf_drv_settings {
914 struct conf_tx_settings tx; 926 struct conf_tx_settings tx;
915 struct conf_conn_settings conn; 927 struct conf_conn_settings conn;
916 struct conf_init_settings init; 928 struct conf_init_settings init;
929 struct conf_itrim_settings itrim;
930 struct conf_pm_config_settings pm_config;
917}; 931};
918 932
919#endif 933#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index c1805e5f896..8d7588ca68f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -237,6 +237,64 @@ static const struct file_operations tx_queue_len_ops = {
237 .open = wl1271_open_file_generic, 237 .open = wl1271_open_file_generic,
238}; 238};
239 239
240static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
241 size_t count, loff_t *ppos)
242{
243 struct wl1271 *wl = file->private_data;
244 bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
245
246 int res;
247 char buf[10];
248
249 res = scnprintf(buf, sizeof(buf), "%d\n", state);
250
251 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
252}
253
254static ssize_t gpio_power_write(struct file *file,
255 const char __user *user_buf,
256 size_t count, loff_t *ppos)
257{
258 struct wl1271 *wl = file->private_data;
259 char buf[10];
260 size_t len;
261 unsigned long value;
262 int ret;
263
264 mutex_lock(&wl->mutex);
265
266 len = min(count, sizeof(buf) - 1);
267 if (copy_from_user(buf, user_buf, len)) {
268 ret = -EFAULT;
269 goto out;
270 }
271 buf[len] = '\0';
272
273 ret = strict_strtoul(buf, 0, &value);
274 if (ret < 0) {
275 wl1271_warning("illegal value in gpio_power");
276 goto out;
277 }
278
279 if (value) {
280 wl->set_power(true);
281 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
282 } else {
283 wl->set_power(false);
284 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
285 }
286
287out:
288 mutex_unlock(&wl->mutex);
289 return count;
290}
291
292static const struct file_operations gpio_power_ops = {
293 .read = gpio_power_read,
294 .write = gpio_power_write,
295 .open = wl1271_open_file_generic
296};
297
240static void wl1271_debugfs_delete_files(struct wl1271 *wl) 298static void wl1271_debugfs_delete_files(struct wl1271 *wl)
241{ 299{
242 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); 300 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -333,6 +391,8 @@ static void wl1271_debugfs_delete_files(struct wl1271 *wl)
333 DEBUGFS_DEL(tx_queue_len); 391 DEBUGFS_DEL(tx_queue_len);
334 DEBUGFS_DEL(retry_count); 392 DEBUGFS_DEL(retry_count);
335 DEBUGFS_DEL(excessive_retries); 393 DEBUGFS_DEL(excessive_retries);
394
395 DEBUGFS_DEL(gpio_power);
336} 396}
337 397
338static int wl1271_debugfs_add_files(struct wl1271 *wl) 398static int wl1271_debugfs_add_files(struct wl1271 *wl)
@@ -434,6 +494,8 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
434 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); 494 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
435 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); 495 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
436 496
497 DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir);
498
437out: 499out:
438 if (ret < 0) 500 if (ret < 0)
439 wl1271_debugfs_delete_files(wl); 501 wl1271_debugfs_delete_files(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index d13fdd99c85..0a145afc990 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -35,7 +35,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
35 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 35 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
36 mbox->scheduled_scan_status); 36 mbox->scheduled_scan_status);
37 37
38 if (wl->scanning) { 38 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
39 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) { 39 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
40 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 40 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
41 NULL, size); 41 NULL, size);
@@ -43,7 +43,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
43 * to the wl1271_cmd_scan function that we are not 43 * to the wl1271_cmd_scan function that we are not
44 * scanning as it checks that. 44 * scanning as it checks that.
45 */ 45 */
46 wl->scanning = false; 46 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
47 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len, 47 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
48 wl->scan.active, 48 wl->scan.active,
49 wl->scan.high_prio, 49 wl->scan.high_prio,
@@ -62,7 +62,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
62 mutex_unlock(&wl->mutex); 62 mutex_unlock(&wl->mutex);
63 ieee80211_scan_completed(wl->hw, false); 63 ieee80211_scan_completed(wl->hw, false);
64 mutex_lock(&wl->mutex); 64 mutex_lock(&wl->mutex);
65 wl->scanning = false; 65 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
66 } 66 }
67 } 67 }
68 return 0; 68 return 0;
@@ -78,7 +78,7 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
78 78
79 switch (mbox->ps_status) { 79 switch (mbox->ps_status) {
80 case EVENT_ENTER_POWER_SAVE_FAIL: 80 case EVENT_ENTER_POWER_SAVE_FAIL:
81 if (!wl->psm) { 81 if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
82 wl->psm_entry_retry = 0; 82 wl->psm_entry_retry = 0;
83 break; 83 break;
84 } 84 }
@@ -89,7 +89,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
89 } else { 89 } else {
90 wl1271_error("PSM entry failed, giving up.\n"); 90 wl1271_error("PSM entry failed, giving up.\n");
91 wl->psm_entry_retry = 0; 91 wl->psm_entry_retry = 0;
92 *beacon_loss = true;
93 } 92 }
94 break; 93 break;
95 case EVENT_ENTER_POWER_SAVE_SUCCESS: 94 case EVENT_ENTER_POWER_SAVE_SUCCESS:
@@ -136,7 +135,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
136 * filtering) is enabled. Without PSM, the stack will receive all 135 * filtering) is enabled. Without PSM, the stack will receive all
137 * beacons and can detect beacon loss by itself. 136 * beacons and can detect beacon loss by itself.
138 */ 137 */
139 if (vector & BSS_LOSE_EVENT_ID && wl->psm) { 138 if (vector & BSS_LOSE_EVENT_ID &&
139 test_bit(WL1271_FLAG_PSM, &wl->flags)) {
140 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); 140 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
141 141
142 /* indicate to the stack, that beacons have been lost */ 142 /* indicate to the stack, that beacons have been lost */
@@ -150,7 +150,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
150 return ret; 150 return ret;
151 } 151 }
152 152
153 if (beacon_loss) { 153 if (wl->vif && beacon_loss) {
154 /* Obviously, it's dangerous to release the mutex while 154 /* Obviously, it's dangerous to release the mutex while
155 we are holding many of the variables in the wl struct. 155 we are holding many of the variables in the wl struct.
156 That's why it's done last in the function, and care must 156 That's why it's done last in the function, and care must
@@ -184,7 +184,7 @@ void wl1271_event_mbox_config(struct wl1271 *wl)
184 wl->mbox_ptr[0], wl->mbox_ptr[1]); 184 wl->mbox_ptr[0], wl->mbox_ptr[1]);
185} 185}
186 186
187int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack) 187int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
188{ 188{
189 struct event_mailbox mbox; 189 struct event_mailbox mbox;
190 int ret; 190 int ret;
@@ -204,9 +204,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
204 return ret; 204 return ret;
205 205
206 /* then we let the firmware know it can go on...*/ 206 /* then we let the firmware know it can go on...*/
207 if (do_ack) 207 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
208 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
209 INTR_TRIG_EVENT_ACK);
210 208
211 return 0; 209 return 0;
212} 210}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 4e3f55ebb1a..278f9206aa5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -112,6 +112,6 @@ struct event_mailbox {
112 112
113int wl1271_event_unmask(struct wl1271 *wl); 113int wl1271_event_unmask(struct wl1271 *wl);
114void wl1271_event_mbox_config(struct wl1271 *wl); 114void wl1271_event_mbox_config(struct wl1271 *wl);
115int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack); 115int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
116 116
117#endif 117#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 11249b436cf..c9848eecb76 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -229,6 +229,10 @@ int wl1271_hw_init(struct wl1271 *wl)
229 if (ret < 0) 229 if (ret < 0)
230 goto out_free_memmap; 230 goto out_free_memmap;
231 231
232 ret = wl1271_acx_dco_itrim_params(wl);
233 if (ret < 0)
234 goto out_free_memmap;
235
232 /* Initialize connection monitoring thresholds */ 236 /* Initialize connection monitoring thresholds */
233 ret = wl1271_acx_conn_monit_params(wl); 237 ret = wl1271_acx_conn_monit_params(wl);
234 if (ret < 0) 238 if (ret < 0)
@@ -280,12 +284,12 @@ int wl1271_hw_init(struct wl1271 *wl)
280 goto out_free_memmap; 284 goto out_free_memmap;
281 285
282 /* Configure TX rate classes */ 286 /* Configure TX rate classes */
283 ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL); 287 ret = wl1271_acx_rate_policies(wl);
284 if (ret < 0) 288 if (ret < 0)
285 goto out_free_memmap; 289 goto out_free_memmap;
286 290
287 /* Enable data path */ 291 /* Enable data path */
288 ret = wl1271_cmd_data_path(wl, wl->channel, 1); 292 ret = wl1271_cmd_data_path(wl, 1);
289 if (ret < 0) 293 if (ret < 0)
290 goto out_free_memmap; 294 goto out_free_memmap;
291 295
@@ -299,8 +303,8 @@ int wl1271_hw_init(struct wl1271 *wl)
299 if (ret < 0) 303 if (ret < 0)
300 goto out_free_memmap; 304 goto out_free_memmap;
301 305
302 /* Configure smart reflex */ 306 /* configure PM */
303 ret = wl1271_acx_smart_reflex(wl); 307 ret = wl1271_acx_pm_config(wl);
304 if (ret < 0) 308 if (ret < 0)
305 goto out_free_memmap; 309 goto out_free_memmap;
306 310
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b62c00ff42f..e4867b895c4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -47,6 +47,8 @@
47#include "wl1271_cmd.h" 47#include "wl1271_cmd.h"
48#include "wl1271_boot.h" 48#include "wl1271_boot.h"
49 49
50#define WL1271_BOOT_RETRIES 3
51
50static struct conf_drv_settings default_conf = { 52static struct conf_drv_settings default_conf = {
51 .sg = { 53 .sg = {
52 .per_threshold = 7500, 54 .per_threshold = 7500,
@@ -67,16 +69,17 @@ static struct conf_drv_settings default_conf = {
67 .ps_poll_timeout = 15, 69 .ps_poll_timeout = 15,
68 .upsd_timeout = 15, 70 .upsd_timeout = 15,
69 .rts_threshold = 2347, 71 .rts_threshold = 2347,
70 .rx_cca_threshold = 0xFFEF, 72 .rx_cca_threshold = 0,
71 .irq_blk_threshold = 0, 73 .irq_blk_threshold = 0xFFFF,
72 .irq_pkt_threshold = USHORT_MAX, 74 .irq_pkt_threshold = 0,
73 .irq_timeout = 5, 75 .irq_timeout = 600,
74 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY, 76 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
75 }, 77 },
76 .tx = { 78 .tx = {
77 .tx_energy_detection = 0, 79 .tx_energy_detection = 0,
78 .rc_conf = { 80 .rc_conf = {
79 .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED, 81 .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
82 CONF_HW_BIT_RATE_2MBPS,
80 .short_retry_limit = 10, 83 .short_retry_limit = 10,
81 .long_retry_limit = 10, 84 .long_retry_limit = 10,
82 .aflags = 0 85 .aflags = 0
@@ -172,8 +175,8 @@ static struct conf_drv_settings default_conf = {
172 } 175 }
173 }, 176 },
174 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, 177 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
175 .tx_compl_timeout = 5, 178 .tx_compl_timeout = 700,
176 .tx_compl_threshold = 5 179 .tx_compl_threshold = 4
177 }, 180 },
178 .conn = { 181 .conn = {
179 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, 182 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -186,12 +189,12 @@ static struct conf_drv_settings default_conf = {
186 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE, 189 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
187 } 190 }
188 }, 191 },
189 .synch_fail_thold = 5, 192 .synch_fail_thold = 10,
190 .bss_lose_timeout = 100, 193 .bss_lose_timeout = 100,
191 .beacon_rx_timeout = 10000, 194 .beacon_rx_timeout = 10000,
192 .broadcast_timeout = 20000, 195 .broadcast_timeout = 20000,
193 .rx_broadcast_in_ps = 1, 196 .rx_broadcast_in_ps = 1,
194 .ps_poll_threshold = 4, 197 .ps_poll_threshold = 20,
195 .sig_trigger_count = 2, 198 .sig_trigger_count = 2,
196 .sig_trigger = { 199 .sig_trigger = {
197 [0] = { 200 [0] = {
@@ -226,46 +229,35 @@ static struct conf_drv_settings default_conf = {
226 .psm_entry_retries = 3 229 .psm_entry_retries = 3
227 }, 230 },
228 .init = { 231 .init = {
229 .sr_err_tbl = {
230 [0] = {
231 .len = 7,
232 .upper_limit = 0x03,
233 .values = {
234 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
235 0x00 }
236 },
237 [1] = {
238 .len = 7,
239 .upper_limit = 0x03,
240 .values = {
241 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
242 0x00 }
243 },
244 [2] = {
245 .len = 7,
246 .upper_limit = 0x03,
247 .values = {
248 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
249 0x00 }
250 }
251 },
252 .sr_enable = 1,
253 .genparam = { 232 .genparam = {
254 .ref_clk = CONF_REF_CLK_38_4_E, 233 .ref_clk = CONF_REF_CLK_38_4_E,
255 .settling_time = 5, 234 .settling_time = 5,
256 .clk_valid_on_wakeup = 0, 235 .clk_valid_on_wakeup = 0,
257 .dc2dcmode = 0, 236 .dc2dcmode = 0,
258 .single_dual_band = CONF_SINGLE_BAND, 237 .single_dual_band = CONF_SINGLE_BAND,
259 .tx_bip_fem_autodetect = 0, 238 .tx_bip_fem_autodetect = 1,
260 .tx_bip_fem_manufacturer = 1, 239 .tx_bip_fem_manufacturer = 1,
261 .settings = 1, 240 .settings = 1,
241 .sr_state = 1,
242 .srf1 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
243 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
244 .srf2 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
245 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
246 .srf3 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
247 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
248 .sr_debug_table = { 0, 0, 0, 0, 0, 0, 0, 0,
249 0, 0, 0, 0, 0, 0, 0, 0 },
250 .sr_sen_n_p = 0,
251 .sr_sen_n_p_gain = 0,
252 .sr_sen_nrn = 0,
253 .sr_sen_prn = 0,
262 }, 254 },
263 .radioparam = { 255 .radioparam = {
264 .rx_trace_loss = 10, 256 .rx_trace_loss = 0x24,
265 .tx_trace_loss = 10, 257 .tx_trace_loss = 0x0,
266 .rx_rssi_and_proc_compens = { 258 .rx_rssi_and_proc_compens = {
267 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8, 259 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
268 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8, 260 0xfc, 0x00, 0x80, 0x10, 0xf0, 0xf8,
269 0x00, 0x0a, 0x14 }, 261 0x00, 0x0a, 0x14 },
270 .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 }, 262 .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
271 .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 }, 263 .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
@@ -273,13 +265,15 @@ static struct conf_drv_settings default_conf = {
273 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
275 0x00, 0x00, 0x00 }, 267 0x00, 0x00, 0x00 },
276 .tx_ref_pd_voltage = 0x24e, 268 .tx_ref_pd_voltage = 0x1a9,
277 .tx_ref_power = 0x78, 269 .tx_ref_power = 0x80,
278 .tx_offset_db = 0x0, 270 .tx_offset_db = 0x0,
279 .tx_rate_limits_normal = { 271 .tx_rate_limits_normal = {
280 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 }, 272 0x1d, 0x1f, 0x24, 0x28, 0x28, 0x29 },
281 .tx_rate_limits_degraded = { 273 .tx_rate_limits_degraded = {
282 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 }, 274 0x19, 0x1f, 0x22, 0x23, 0x27, 0x28 },
275 .tx_rate_limits_extreme = {
276 0x19, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
283 .tx_channel_limits_11b = { 277 .tx_channel_limits_11b = {
284 0x22, 0x50, 0x50, 0x50, 0x50, 0x50, 278 0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
285 0x50, 0x50, 0x50, 0x50, 0x22, 0x50, 279 0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
@@ -289,10 +283,12 @@ static struct conf_drv_settings default_conf = {
289 0x50, 0x50, 0x50, 0x50, 0x20, 0x50, 283 0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
290 0x20, 0x50 }, 284 0x20, 0x50 },
291 .tx_pdv_rate_offsets = { 285 .tx_pdv_rate_offsets = {
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 286 0x07, 0x08, 0x04, 0x02, 0x02, 0x00 },
293 .tx_ibias = { 287 .tx_ibias = {
294 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 }, 288 0x11, 0x11, 0x15, 0x11, 0x15, 0x0f },
295 .rx_fem_insertion_loss = 0x14, 289 .rx_fem_insertion_loss = 0x0e,
290 .degraded_low_to_normal_threshold = 0x1e,
291 .degraded_normal_to_high_threshold = 0x2d,
296 .tx_ref_pd_voltage_5 = { 292 .tx_ref_pd_voltage_5 = {
297 0x0190, 0x01a4, 0x01c3, 0x01d8, 293 0x0190, 0x01a4, 0x01c3, 0x01d8,
298 0x020a, 0x021c }, 294 0x020a, 0x021c },
@@ -304,6 +300,8 @@ static struct conf_drv_settings default_conf = {
304 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 }, 300 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
305 .tx_rate_limits_degraded_5 = { 301 .tx_rate_limits_degraded_5 = {
306 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 }, 302 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
303 .tx_rate_limits_extreme_5 = {
304 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
307 .tx_channel_limits_ofdm_5 = { 305 .tx_channel_limits_ofdm_5 = {
308 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 306 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
309 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 307 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
@@ -315,8 +313,18 @@ static struct conf_drv_settings default_conf = {
315 .tx_ibias_5 = { 313 .tx_ibias_5 = {
316 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }, 314 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
317 .rx_fem_insertion_loss_5 = { 315 .rx_fem_insertion_loss_5 = {
318 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 } 316 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
317 .degraded_low_to_normal_threshold_5 = 0x00,
318 .degraded_normal_to_high_threshold_5 = 0x00
319 } 319 }
320 },
321 .itrim = {
322 .enable = false,
323 .timeout = 50000,
324 },
325 .pm_config = {
326 .host_clk_settling_time = 5000,
327 .host_fast_wakeup_support = false
320 } 328 }
321}; 329};
322 330
@@ -359,7 +367,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
359 if (ret < 0) 367 if (ret < 0)
360 return ret; 368 return ret;
361 369
362 ret = wl1271_cmd_data_path(wl, wl->channel, 1); 370 ret = wl1271_cmd_data_path(wl, 1);
363 if (ret < 0) 371 if (ret < 0)
364 return ret; 372 return ret;
365 373
@@ -374,11 +382,13 @@ static void wl1271_disable_interrupts(struct wl1271 *wl)
374static void wl1271_power_off(struct wl1271 *wl) 382static void wl1271_power_off(struct wl1271 *wl)
375{ 383{
376 wl->set_power(false); 384 wl->set_power(false);
385 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
377} 386}
378 387
379static void wl1271_power_on(struct wl1271 *wl) 388static void wl1271_power_on(struct wl1271 *wl)
380{ 389{
381 wl->set_power(true); 390 wl->set_power(true);
391 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
382} 392}
383 393
384static void wl1271_fw_status(struct wl1271 *wl, 394static void wl1271_fw_status(struct wl1271 *wl,
@@ -447,14 +457,13 @@ static void wl1271_irq_work(struct work_struct *work)
447 intr &= WL1271_INTR_MASK; 457 intr &= WL1271_INTR_MASK;
448 458
449 if (intr & WL1271_ACX_INTR_EVENT_A) { 459 if (intr & WL1271_ACX_INTR_EVENT_A) {
450 bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
451 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); 460 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
452 wl1271_event_handle(wl, 0, do_ack); 461 wl1271_event_handle(wl, 0);
453 } 462 }
454 463
455 if (intr & WL1271_ACX_INTR_EVENT_B) { 464 if (intr & WL1271_ACX_INTR_EVENT_B) {
456 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); 465 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
457 wl1271_event_handle(wl, 1, true); 466 wl1271_event_handle(wl, 1);
458 } 467 }
459 468
460 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 469 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -614,6 +623,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
614 struct wl1271_partition_set partition; 623 struct wl1271_partition_set partition;
615 int ret = 0; 624 int ret = 0;
616 625
626 msleep(WL1271_PRE_POWER_ON_SLEEP);
617 wl1271_power_on(wl); 627 wl1271_power_on(wl);
618 msleep(WL1271_POWER_ON_SLEEP); 628 msleep(WL1271_POWER_ON_SLEEP);
619 wl1271_spi_reset(wl); 629 wl1271_spi_reset(wl);
@@ -643,7 +653,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
643 653
644 ret = wl1271_setup(wl); 654 ret = wl1271_setup(wl);
645 if (ret < 0) 655 if (ret < 0)
646 goto out_power_off; 656 goto out;
647 break; 657 break;
648 case CHIP_ID_1271_PG20: 658 case CHIP_ID_1271_PG20:
649 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 659 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -651,38 +661,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
651 661
652 ret = wl1271_setup(wl); 662 ret = wl1271_setup(wl);
653 if (ret < 0) 663 if (ret < 0)
654 goto out_power_off; 664 goto out;
655 break; 665 break;
656 default: 666 default:
657 wl1271_error("unsupported chip id: 0x%x", wl->chip.id); 667 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
658 ret = -ENODEV; 668 ret = -ENODEV;
659 goto out_power_off; 669 goto out;
660 } 670 }
661 671
662 if (wl->fw == NULL) { 672 if (wl->fw == NULL) {
663 ret = wl1271_fetch_firmware(wl); 673 ret = wl1271_fetch_firmware(wl);
664 if (ret < 0) 674 if (ret < 0)
665 goto out_power_off; 675 goto out;
666 } 676 }
667 677
668 /* No NVS from netlink, try to get it from the filesystem */ 678 /* No NVS from netlink, try to get it from the filesystem */
669 if (wl->nvs == NULL) { 679 if (wl->nvs == NULL) {
670 ret = wl1271_fetch_nvs(wl); 680 ret = wl1271_fetch_nvs(wl);
671 if (ret < 0) 681 if (ret < 0)
672 goto out_power_off; 682 goto out;
673 } 683 }
674 684
675 goto out;
676
677out_power_off:
678 wl1271_power_off(wl);
679
680out: 685out:
681 return ret; 686 return ret;
682} 687}
683 688
684int wl1271_plt_start(struct wl1271 *wl) 689int wl1271_plt_start(struct wl1271 *wl)
685{ 690{
691 int retries = WL1271_BOOT_RETRIES;
686 int ret; 692 int ret;
687 693
688 mutex_lock(&wl->mutex); 694 mutex_lock(&wl->mutex);
@@ -696,35 +702,48 @@ int wl1271_plt_start(struct wl1271 *wl)
696 goto out; 702 goto out;
697 } 703 }
698 704
699 wl->state = WL1271_STATE_PLT; 705 while (retries) {
700 706 retries--;
701 ret = wl1271_chip_wakeup(wl); 707 ret = wl1271_chip_wakeup(wl);
702 if (ret < 0) 708 if (ret < 0)
703 goto out; 709 goto power_off;
704
705 ret = wl1271_boot(wl);
706 if (ret < 0)
707 goto out_power_off;
708
709 wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
710 710
711 ret = wl1271_plt_init(wl); 711 ret = wl1271_boot(wl);
712 if (ret < 0) 712 if (ret < 0)
713 goto out_irq_disable; 713 goto power_off;
714 714
715 /* Make sure power saving is disabled */ 715 ret = wl1271_plt_init(wl);
716 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); 716 if (ret < 0)
717 if (ret < 0) 717 goto irq_disable;
718 goto out_irq_disable;
719 718
720 goto out; 719 /* Make sure power saving is disabled */
720 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
721 if (ret < 0)
722 goto irq_disable;
721 723
722out_irq_disable: 724 wl->state = WL1271_STATE_PLT;
723 wl1271_disable_interrupts(wl); 725 wl1271_notice("firmware booted in PLT mode (%s)",
726 wl->chip.fw_ver);
727 goto out;
724 728
725out_power_off: 729irq_disable:
726 wl1271_power_off(wl); 730 wl1271_disable_interrupts(wl);
731 mutex_unlock(&wl->mutex);
732 /* Unlocking the mutex in the middle of handling is
733 inherently unsafe. In this case we deem it safe to do,
734 because we need to let any possibly pending IRQ out of
735 the system (and while we are WL1271_STATE_OFF the IRQ
736 work function will not do anything.) Also, any other
737 possible concurrent operations will fail due to the
738 current state, hence the wl1271 struct should be safe. */
739 cancel_work_sync(&wl->irq_work);
740 mutex_lock(&wl->mutex);
741power_off:
742 wl1271_power_off(wl);
743 }
727 744
745 wl1271_error("firmware boot in PLT mode failed despite %d retries",
746 WL1271_BOOT_RETRIES);
728out: 747out:
729 mutex_unlock(&wl->mutex); 748 mutex_unlock(&wl->mutex);
730 749
@@ -762,7 +781,20 @@ out:
762static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 781static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
763{ 782{
764 struct wl1271 *wl = hw->priv; 783 struct wl1271 *wl = hw->priv;
784 struct ieee80211_conf *conf = &hw->conf;
785 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
786 struct ieee80211_sta *sta = txinfo->control.sta;
787 unsigned long flags;
765 788
789 /* peek into the rates configured in the STA entry */
790 spin_lock_irqsave(&wl->wl_lock, flags);
791 if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) {
792 wl->sta_rate_set = sta->supp_rates[conf->channel->band];
793 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
794 }
795 spin_unlock_irqrestore(&wl->wl_lock, flags);
796
797 /* queue the packet */
766 skb_queue_tail(&wl->tx_queue, skb); 798 skb_queue_tail(&wl->tx_queue, skb);
767 799
768 /* 800 /*
@@ -784,7 +816,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
784 * protected. Maybe fix this by removing the stupid 816 * protected. Maybe fix this by removing the stupid
785 * variable altogether and checking the real queue state? 817 * variable altogether and checking the real queue state?
786 */ 818 */
787 wl->tx_queue_stopped = true; 819 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
788 } 820 }
789 821
790 return NETDEV_TX_OK; 822 return NETDEV_TX_OK;
@@ -880,6 +912,7 @@ static struct notifier_block wl1271_dev_notifier = {
880static int wl1271_op_start(struct ieee80211_hw *hw) 912static int wl1271_op_start(struct ieee80211_hw *hw)
881{ 913{
882 struct wl1271 *wl = hw->priv; 914 struct wl1271 *wl = hw->priv;
915 int retries = WL1271_BOOT_RETRIES;
883 int ret = 0; 916 int ret = 0;
884 917
885 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 918 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -893,30 +926,42 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
893 goto out; 926 goto out;
894 } 927 }
895 928
896 ret = wl1271_chip_wakeup(wl); 929 while (retries) {
897 if (ret < 0) 930 retries--;
898 goto out; 931 ret = wl1271_chip_wakeup(wl);
899 932 if (ret < 0)
900 ret = wl1271_boot(wl); 933 goto power_off;
901 if (ret < 0)
902 goto out_power_off;
903
904 ret = wl1271_hw_init(wl);
905 if (ret < 0)
906 goto out_irq_disable;
907
908 wl->state = WL1271_STATE_ON;
909 934
910 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 935 ret = wl1271_boot(wl);
936 if (ret < 0)
937 goto power_off;
911 938
912 goto out; 939 ret = wl1271_hw_init(wl);
940 if (ret < 0)
941 goto irq_disable;
913 942
914out_irq_disable: 943 wl->state = WL1271_STATE_ON;
915 wl1271_disable_interrupts(wl); 944 wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
945 goto out;
916 946
917out_power_off: 947irq_disable:
918 wl1271_power_off(wl); 948 wl1271_disable_interrupts(wl);
949 mutex_unlock(&wl->mutex);
950 /* Unlocking the mutex in the middle of handling is
951 inherently unsafe. In this case we deem it safe to do,
952 because we need to let any possibly pending IRQ out of
953 the system (and while we are WL1271_STATE_OFF the IRQ
954 work function will not do anything.) Also, any other
955 possible concurrent operations will fail due to the
956 current state, hence the wl1271 struct should be safe. */
957 cancel_work_sync(&wl->irq_work);
958 mutex_lock(&wl->mutex);
959power_off:
960 wl1271_power_off(wl);
961 }
919 962
963 wl1271_error("firmware boot failed despite %d retries",
964 WL1271_BOOT_RETRIES);
920out: 965out:
921 mutex_unlock(&wl->mutex); 966 mutex_unlock(&wl->mutex);
922 967
@@ -944,11 +989,10 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
944 989
945 WARN_ON(wl->state != WL1271_STATE_ON); 990 WARN_ON(wl->state != WL1271_STATE_ON);
946 991
947 if (wl->scanning) { 992 if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
948 mutex_unlock(&wl->mutex); 993 mutex_unlock(&wl->mutex);
949 ieee80211_scan_completed(wl->hw, true); 994 ieee80211_scan_completed(wl->hw, true);
950 mutex_lock(&wl->mutex); 995 mutex_lock(&wl->mutex);
951 wl->scanning = false;
952 } 996 }
953 997
954 wl->state = WL1271_STATE_OFF; 998 wl->state = WL1271_STATE_OFF;
@@ -973,10 +1017,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
973 wl->band = IEEE80211_BAND_2GHZ; 1017 wl->band = IEEE80211_BAND_2GHZ;
974 1018
975 wl->rx_counter = 0; 1019 wl->rx_counter = 0;
976 wl->elp = false;
977 wl->psm = 0;
978 wl->psm_entry_retry = 0; 1020 wl->psm_entry_retry = 0;
979 wl->tx_queue_stopped = false;
980 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1021 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
981 wl->tx_blocks_available = 0; 1022 wl->tx_blocks_available = 0;
982 wl->tx_results_count = 0; 1023 wl->tx_results_count = 0;
@@ -986,7 +1027,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
986 wl->tx_security_seq_32 = 0; 1027 wl->tx_security_seq_32 = 0;
987 wl->time_offset = 0; 1028 wl->time_offset = 0;
988 wl->session_counter = 0; 1029 wl->session_counter = 0;
989 wl->joined = false; 1030 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1031 wl->sta_rate_set = 0;
1032 wl->flags = 0;
990 1033
991 for (i = 0; i < NUM_TX_QUEUES; i++) 1034 for (i = 0; i < NUM_TX_QUEUES; i++)
992 wl->tx_blocks_freed[i] = 0; 1035 wl->tx_blocks_freed[i] = 0;
@@ -996,13 +1039,13 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
996} 1039}
997 1040
998static int wl1271_op_add_interface(struct ieee80211_hw *hw, 1041static int wl1271_op_add_interface(struct ieee80211_hw *hw,
999 struct ieee80211_if_init_conf *conf) 1042 struct ieee80211_vif *vif)
1000{ 1043{
1001 struct wl1271 *wl = hw->priv; 1044 struct wl1271 *wl = hw->priv;
1002 int ret = 0; 1045 int ret = 0;
1003 1046
1004 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 1047 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
1005 conf->type, conf->mac_addr); 1048 vif->type, vif->addr);
1006 1049
1007 mutex_lock(&wl->mutex); 1050 mutex_lock(&wl->mutex);
1008 if (wl->vif) { 1051 if (wl->vif) {
@@ -1010,9 +1053,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1010 goto out; 1053 goto out;
1011 } 1054 }
1012 1055
1013 wl->vif = conf->vif; 1056 wl->vif = vif;
1014 1057
1015 switch (conf->type) { 1058 switch (vif->type) {
1016 case NL80211_IFTYPE_STATION: 1059 case NL80211_IFTYPE_STATION:
1017 wl->bss_type = BSS_TYPE_STA_BSS; 1060 wl->bss_type = BSS_TYPE_STA_BSS;
1018 break; 1061 break;
@@ -1032,7 +1075,7 @@ out:
1032} 1075}
1033 1076
1034static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 1077static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1035 struct ieee80211_if_init_conf *conf) 1078 struct ieee80211_vif *vif)
1036{ 1079{
1037 struct wl1271 *wl = hw->priv; 1080 struct wl1271 *wl = hw->priv;
1038 1081
@@ -1109,6 +1152,51 @@ out:
1109} 1152}
1110#endif 1153#endif
1111 1154
1155static int wl1271_join_channel(struct wl1271 *wl, int channel)
1156{
1157 int ret = 0;
1158 /* we need to use a dummy BSSID for now */
1159 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
1160 0xad, 0xbe, 0xef };
1161
1162 /* the dummy join is not required for ad-hoc */
1163 if (wl->bss_type == BSS_TYPE_IBSS)
1164 goto out;
1165
1166 /* disable mac filter, so we hear everything */
1167 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
1168
1169 wl->channel = channel;
1170 memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
1171
1172 ret = wl1271_cmd_join(wl);
1173 if (ret < 0)
1174 goto out;
1175
1176 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1177
1178out:
1179 return ret;
1180}
1181
1182static int wl1271_unjoin_channel(struct wl1271 *wl)
1183{
1184 int ret;
1185
1186 /* to stop listening to a channel, we disconnect */
1187 ret = wl1271_cmd_disconnect(wl);
1188 if (ret < 0)
1189 goto out;
1190
1191 clear_bit(WL1271_FLAG_JOINED, &wl->flags);
1192 wl->channel = 0;
1193 memset(wl->bssid, 0, ETH_ALEN);
1194 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1195
1196out:
1197 return ret;
1198}
1199
1112static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) 1200static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1113{ 1201{
1114 struct wl1271 *wl = hw->priv; 1202 struct wl1271 *wl = hw->priv;
@@ -1117,10 +1205,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1117 1205
1118 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 1206 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
1119 1207
1120 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d", 1208 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
1121 channel, 1209 channel,
1122 conf->flags & IEEE80211_CONF_PS ? "on" : "off", 1210 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
1123 conf->power_level); 1211 conf->power_level,
1212 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
1124 1213
1125 mutex_lock(&wl->mutex); 1214 mutex_lock(&wl->mutex);
1126 1215
@@ -1130,34 +1219,44 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1130 if (ret < 0) 1219 if (ret < 0)
1131 goto out; 1220 goto out;
1132 1221
1133 if (channel != wl->channel) { 1222 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1134 /* 1223 if (conf->flags & IEEE80211_CONF_IDLE &&
1135 * We assume that the stack will configure the right channel 1224 test_bit(WL1271_FLAG_JOINED, &wl->flags))
1136 * before associating, so we don't need to send a join 1225 wl1271_unjoin_channel(wl);
1137 * command here. We will join the right channel when the 1226 else if (!(conf->flags & IEEE80211_CONF_IDLE))
1138 * BSSID changes 1227 wl1271_join_channel(wl, channel);
1139 */ 1228
1140 wl->channel = channel; 1229 if (conf->flags & IEEE80211_CONF_IDLE) {
1230 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1231 wl->sta_rate_set = 0;
1232 wl1271_acx_rate_policies(wl);
1233 }
1141 } 1234 }
1142 1235
1143 if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) { 1236 /* if the channel changes while joined, join again */
1144 wl1271_info("psm enabled"); 1237 if (channel != wl->channel && test_bit(WL1271_FLAG_JOINED, &wl->flags))
1238 wl1271_join_channel(wl, channel);
1145 1239
1146 wl->psm_requested = true; 1240 if (conf->flags & IEEE80211_CONF_PS &&
1241 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1242 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
1147 1243
1148 /* 1244 /*
1149 * We enter PSM only if we're already associated. 1245 * We enter PSM only if we're already associated.
1150 * If we're not, we'll enter it when joining an SSID, 1246 * If we're not, we'll enter it when joining an SSID,
1151 * through the bss_info_changed() hook. 1247 * through the bss_info_changed() hook.
1152 */ 1248 */
1153 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE); 1249 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
1250 wl1271_info("psm enabled");
1251 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
1252 }
1154 } else if (!(conf->flags & IEEE80211_CONF_PS) && 1253 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
1155 wl->psm_requested) { 1254 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1156 wl1271_info("psm disabled"); 1255 wl1271_info("psm disabled");
1157 1256
1158 wl->psm_requested = false; 1257 clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
1159 1258
1160 if (wl->psm) 1259 if (test_bit(WL1271_FLAG_PSM, &wl->flags))
1161 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE); 1260 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE);
1162 } 1261 }
1163 1262
@@ -1440,22 +1539,6 @@ out:
1440 return ret; 1539 return ret;
1441} 1540}
1442 1541
1443static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
1444{
1445 struct ieee80211_supported_band *band;
1446 u32 enabled_rates = 0;
1447 int bit;
1448
1449 band = wl->hw->wiphy->bands[wl->band];
1450 for (bit = 0; bit < band->n_bitrates; bit++) {
1451 if (basic_rate_set & 0x1)
1452 enabled_rates |= band->bitrates[bit].hw_value;
1453 basic_rate_set >>= 1;
1454 }
1455
1456 return enabled_rates;
1457}
1458
1459static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 1542static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1460 struct ieee80211_vif *vif, 1543 struct ieee80211_vif *vif,
1461 struct ieee80211_bss_conf *bss_conf, 1544 struct ieee80211_bss_conf *bss_conf,
@@ -1473,9 +1556,68 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1473 if (ret < 0) 1556 if (ret < 0)
1474 goto out; 1557 goto out;
1475 1558
1559 if ((changed & BSS_CHANGED_BSSID) &&
1560 /*
1561 * Now we know the correct bssid, so we send a new join command
1562 * and enable the BSSID filter
1563 */
1564 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
1565 wl->rx_config |= CFG_BSSID_FILTER_EN;
1566 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1567 ret = wl1271_cmd_build_null_data(wl);
1568 if (ret < 0) {
1569 wl1271_warning("cmd buld null data failed %d",
1570 ret);
1571 goto out_sleep;
1572 }
1573 ret = wl1271_cmd_join(wl);
1574 if (ret < 0) {
1575 wl1271_warning("cmd join failed %d", ret);
1576 goto out_sleep;
1577 }
1578 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1579 }
1580
1581 if (wl->bss_type == BSS_TYPE_IBSS) {
1582 /* FIXME: This implements rudimentary ad-hoc support -
1583 proper templates are on the wish list and notification
1584 on when they change. This patch will update the templates
1585 on every call to this function. Also, the firmware will not
1586 answer to probe-requests as it does not have the proper
1587 SSID set in the JOIN command. The probe-response template
1588 is set nevertheless, as the FW will ASSERT without it */
1589 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1590
1591 if (beacon) {
1592 struct ieee80211_hdr *hdr;
1593 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
1594 beacon->data,
1595 beacon->len);
1596
1597 if (ret < 0) {
1598 dev_kfree_skb(beacon);
1599 goto out_sleep;
1600 }
1601
1602 hdr = (struct ieee80211_hdr *) beacon->data;
1603 hdr->frame_control = cpu_to_le16(
1604 IEEE80211_FTYPE_MGMT |
1605 IEEE80211_STYPE_PROBE_RESP);
1606
1607 ret = wl1271_cmd_template_set(wl,
1608 CMD_TEMPL_PROBE_RESPONSE,
1609 beacon->data,
1610 beacon->len);
1611 dev_kfree_skb(beacon);
1612 if (ret < 0)
1613 goto out_sleep;
1614 }
1615 }
1616
1476 if (changed & BSS_CHANGED_ASSOC) { 1617 if (changed & BSS_CHANGED_ASSOC) {
1477 if (bss_conf->assoc) { 1618 if (bss_conf->assoc) {
1478 wl->aid = bss_conf->aid; 1619 wl->aid = bss_conf->aid;
1620 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1479 1621
1480 /* 1622 /*
1481 * with wl1271, we don't need to update the 1623 * with wl1271, we don't need to update the
@@ -1492,7 +1634,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1492 goto out_sleep; 1634 goto out_sleep;
1493 1635
1494 /* If we want to go in PSM but we're not there yet */ 1636 /* If we want to go in PSM but we're not there yet */
1495 if (wl->psm_requested && !wl->psm) { 1637 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
1638 !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
1496 mode = STATION_POWER_SAVE_MODE; 1639 mode = STATION_POWER_SAVE_MODE;
1497 ret = wl1271_ps_set_mode(wl, mode); 1640 ret = wl1271_ps_set_mode(wl, mode);
1498 if (ret < 0) 1641 if (ret < 0)
@@ -1500,7 +1643,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1500 } 1643 }
1501 } else { 1644 } else {
1502 /* use defaults when not associated */ 1645 /* use defaults when not associated */
1503 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET; 1646 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1504 wl->aid = 0; 1647 wl->aid = 0;
1505 } 1648 }
1506 1649
@@ -1535,17 +1678,6 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1535 } 1678 }
1536 } 1679 }
1537 1680
1538 if (changed & BSS_CHANGED_BASIC_RATES) {
1539 wl->basic_rate_set = wl1271_enabled_rates_get(
1540 wl, bss_conf->basic_rates);
1541
1542 ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
1543 if (ret < 0) {
1544 wl1271_warning("Set rate policies failed %d", ret);
1545 goto out_sleep;
1546 }
1547 }
1548
1549out_sleep: 1681out_sleep:
1550 wl1271_ps_elp_sleep(wl); 1682 wl1271_ps_elp_sleep(wl);
1551 1683
@@ -1599,19 +1731,19 @@ static struct ieee80211_rate wl1271_rates[] = {
1599 1731
1600/* can't be const, mac80211 writes to this */ 1732/* can't be const, mac80211 writes to this */
1601static struct ieee80211_channel wl1271_channels[] = { 1733static struct ieee80211_channel wl1271_channels[] = {
1602 { .hw_value = 1, .center_freq = 2412}, 1734 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
1603 { .hw_value = 2, .center_freq = 2417}, 1735 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
1604 { .hw_value = 3, .center_freq = 2422}, 1736 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
1605 { .hw_value = 4, .center_freq = 2427}, 1737 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
1606 { .hw_value = 5, .center_freq = 2432}, 1738 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
1607 { .hw_value = 6, .center_freq = 2437}, 1739 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
1608 { .hw_value = 7, .center_freq = 2442}, 1740 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
1609 { .hw_value = 8, .center_freq = 2447}, 1741 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
1610 { .hw_value = 9, .center_freq = 2452}, 1742 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
1611 { .hw_value = 10, .center_freq = 2457}, 1743 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
1612 { .hw_value = 11, .center_freq = 2462}, 1744 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
1613 { .hw_value = 12, .center_freq = 2467}, 1745 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
1614 { .hw_value = 13, .center_freq = 2472}, 1746 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
1615}; 1747};
1616 1748
1617/* can't be const, mac80211 writes to this */ 1749/* can't be const, mac80211 writes to this */
@@ -1757,7 +1889,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
1757 IEEE80211_HW_BEACON_FILTER | 1889 IEEE80211_HW_BEACON_FILTER |
1758 IEEE80211_HW_SUPPORTS_PS; 1890 IEEE80211_HW_SUPPORTS_PS;
1759 1891
1760 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1892 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1893 BIT(NL80211_IFTYPE_ADHOC);
1761 wl->hw->wiphy->max_scan_ssids = 1; 1894 wl->hw->wiphy->max_scan_ssids = 1;
1762 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 1895 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
1763 1896
@@ -1818,21 +1951,18 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1818 1951
1819 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 1952 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
1820 wl->channel = WL1271_DEFAULT_CHANNEL; 1953 wl->channel = WL1271_DEFAULT_CHANNEL;
1821 wl->scanning = false;
1822 wl->default_key = 0; 1954 wl->default_key = 0;
1823 wl->rx_counter = 0; 1955 wl->rx_counter = 0;
1824 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1956 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1825 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 1957 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
1826 wl->elp = false;
1827 wl->psm = 0;
1828 wl->psm_requested = false;
1829 wl->psm_entry_retry = 0; 1958 wl->psm_entry_retry = 0;
1830 wl->tx_queue_stopped = false;
1831 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1959 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1832 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET; 1960 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1961 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1962 wl->sta_rate_set = 0;
1833 wl->band = IEEE80211_BAND_2GHZ; 1963 wl->band = IEEE80211_BAND_2GHZ;
1834 wl->vif = NULL; 1964 wl->vif = NULL;
1835 wl->joined = false; 1965 wl->flags = 0;
1836 1966
1837 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 1967 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
1838 wl->tx_frames[i] = NULL; 1968 wl->tx_frames[i] = NULL;
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 507cd91d7ee..e407790f677 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -39,12 +39,13 @@ void wl1271_elp_work(struct work_struct *work)
39 39
40 mutex_lock(&wl->mutex); 40 mutex_lock(&wl->mutex);
41 41
42 if (wl->elp || !wl->psm) 42 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
43 !test_bit(WL1271_FLAG_PSM, &wl->flags))
43 goto out; 44 goto out;
44 45
45 wl1271_debug(DEBUG_PSM, "chip to elp"); 46 wl1271_debug(DEBUG_PSM, "chip to elp");
46 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); 47 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
47 wl->elp = true; 48 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
48 49
49out: 50out:
50 mutex_unlock(&wl->mutex); 51 mutex_unlock(&wl->mutex);
@@ -55,7 +56,7 @@ out:
55/* Routines to toggle sleep mode while in ELP */ 56/* Routines to toggle sleep mode while in ELP */
56void wl1271_ps_elp_sleep(struct wl1271 *wl) 57void wl1271_ps_elp_sleep(struct wl1271 *wl)
57{ 58{
58 if (wl->psm) { 59 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
59 cancel_delayed_work(&wl->elp_work); 60 cancel_delayed_work(&wl->elp_work);
60 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 61 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
61 msecs_to_jiffies(ELP_ENTRY_DELAY)); 62 msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -70,7 +71,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
70 u32 start_time = jiffies; 71 u32 start_time = jiffies;
71 bool pending = false; 72 bool pending = false;
72 73
73 if (!wl->elp) 74 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
74 return 0; 75 return 0;
75 76
76 wl1271_debug(DEBUG_PSM, "waking up chip from elp"); 77 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
@@ -101,7 +102,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
101 } 102 }
102 } 103 }
103 104
104 wl->elp = false; 105 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
105 106
106 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", 107 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
107 jiffies_to_msecs(jiffies - start_time)); 108 jiffies_to_msecs(jiffies - start_time));
@@ -143,7 +144,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
143 if (ret < 0) 144 if (ret < 0)
144 return ret; 145 return ret;
145 146
146 wl->psm = 1; 147 set_bit(WL1271_FLAG_PSM, &wl->flags);
147 break; 148 break;
148 case STATION_ACTIVE_MODE: 149 case STATION_ACTIVE_MODE:
149 default: 150 default:
@@ -166,7 +167,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
166 if (ret < 0) 167 if (ret < 0)
167 return ret; 168 return ret;
168 169
169 wl->psm = 0; 170 clear_bit(WL1271_FLAG_PSM, &wl->flags);
170 break; 171 break;
171 } 172 }
172 173
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index 1f237389d1c..99096077152 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -62,73 +62,10 @@
62#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008) 62#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008)
63#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c) 63#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c)
64#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018) 64#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018)
65/*
66 * Interrupt registers.
67 * 64 bit interrupt sources registers ws ced.
68 * sme interupts were removed and new ones were added.
69 * Order was changed.
70 */
71#define FIQ_MASK (REGISTERS_BASE + 0x0400)
72#define FIQ_MASK_L (REGISTERS_BASE + 0x0400)
73#define FIQ_MASK_H (REGISTERS_BASE + 0x0404)
74#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408)
75#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408)
76#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C)
77#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410)
78#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410)
79#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414)
80#define IRQ_MASK (REGISTERS_BASE + 0x0418)
81#define IRQ_MASK_L (REGISTERS_BASE + 0x0418)
82#define IRQ_MASK_H (REGISTERS_BASE + 0x041C)
83#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420)
84#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420)
85#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424)
86#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428)
87#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428)
88#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C)
89#define ECPU_MASK (REGISTERS_BASE + 0x0448)
90#define FIQ_STS_L (REGISTERS_BASE + 0x044C)
91#define FIQ_STS_H (REGISTERS_BASE + 0x0450)
92#define IRQ_STS_L (REGISTERS_BASE + 0x0454)
93#define IRQ_STS_H (REGISTERS_BASE + 0x0458)
94#define INT_STS_ND (REGISTERS_BASE + 0x0464)
95#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464)
96#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468)
97#define INT_STS_CLR (REGISTERS_BASE + 0x04B4)
98#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4)
99#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8)
100#define INT_ACK (REGISTERS_BASE + 0x046C)
101#define INT_ACK_L (REGISTERS_BASE + 0x046C)
102#define INT_ACK_H (REGISTERS_BASE + 0x0470)
103#define INT_TRIG (REGISTERS_BASE + 0x0474)
104#define INT_TRIG_L (REGISTERS_BASE + 0x0474)
105#define INT_TRIG_H (REGISTERS_BASE + 0x0478)
106#define HOST_STS_L (REGISTERS_BASE + 0x045C)
107#define HOST_STS_H (REGISTERS_BASE + 0x0460)
108#define HOST_MASK (REGISTERS_BASE + 0x0430)
109#define HOST_MASK_L (REGISTERS_BASE + 0x0430)
110#define HOST_MASK_H (REGISTERS_BASE + 0x0434)
111#define HOST_MASK_SET (REGISTERS_BASE + 0x0438)
112#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438)
113#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C)
114#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440)
115#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440)
116#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444)
117 65
118#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474) 66#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474)
119#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478) 67#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478)
120 68
121/* Host Interrupts*/
122#define HINT_MASK (REGISTERS_BASE + 0x0494)
123#define HINT_MASK_SET (REGISTERS_BASE + 0x0498)
124#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C)
125#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0)
126/*1150 spec calls this HINT_STS_RAW*/
127#define HINT_STS_ND (REGISTERS_BASE + 0x04B0)
128#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4)
129#define HINT_ACK (REGISTERS_BASE + 0x04A8)
130#define HINT_TRIG (REGISTERS_BASE + 0x04AC)
131
132/*============================================= 69/*=============================================
133 Host Interrupt Mask Register - 32bit (RW) 70 Host Interrupt Mask Register - 32bit (RW)
134 ------------------------------------------ 71 ------------------------------------------
@@ -433,16 +370,6 @@
433 370
434 371
435/*=============================================== 372/*===============================================
436 Phy regs
437 ===============================================*/
438#define ACX_PHY_ADDR_REG SBB_ADDR
439#define ACX_PHY_DATA_REG SBB_DATA
440#define ACX_PHY_CTRL_REG SBB_CTL
441#define ACX_PHY_REG_WR_MASK 0x00000001ul
442#define ACX_PHY_REG_RD_MASK 0x00000002ul
443
444
445/*===============================================
446 EEPROM Read/Write Request 32bit RW 373 EEPROM Read/Write Request 32bit RW
447 ------------------------------------------ 374 ------------------------------------------
448 1 EE_READ - EEPROM Read Request 1 - Setting this bit 375 1 EE_READ - EEPROM Read Request 1 - Setting this bit
@@ -511,28 +438,6 @@
511#define ACX_CONT_WIND_MIN_MASK 0x0000007f 438#define ACX_CONT_WIND_MIN_MASK 0x0000007f
512#define ACX_CONT_WIND_MAX 0x03ff0000 439#define ACX_CONT_WIND_MAX 0x03ff0000
513 440
514/*
515 * Indirect slave register/memory registers
516 * ----------------------------------------
517 */
518#define HW_SLAVE_REG_ADDR_REG 0x00000004
519#define HW_SLAVE_REG_DATA_REG 0x00000008
520#define HW_SLAVE_REG_CTRL_REG 0x0000000c
521
522#define SLAVE_AUTO_INC 0x00010000
523#define SLAVE_NO_AUTO_INC 0x00000000
524#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000
525
526#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR
527#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA
528#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL
529#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL
530
531#define HW_FUNC_EVENT_INT_EN 0x8000
532#define HW_FUNC_EVENT_MASK_REG 0x00000034
533
534#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP)
535
536/*=============================================== 441/*===============================================
537 HI_CFG Interface Configuration Register Values 442 HI_CFG Interface Configuration Register Values
538 ------------------------------------------ 443 ------------------------------------------
@@ -647,10 +552,6 @@ b12-b0 - Supported Rate indicator bits as defined below.
647******************************************************************************/ 552******************************************************************************/
648 553
649 554
650#define TNETW1251_CHIP_ID_PG1_0 0x07010101
651#define TNETW1251_CHIP_ID_PG1_1 0x07020101
652#define TNETW1251_CHIP_ID_PG1_2 0x07030101
653
654/************************************************************************* 555/*************************************************************************
655 556
656 Interrupt Trigger Register (Host -> WiLink) 557 Interrupt Trigger Register (Host -> WiLink)
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 02978a16e73..ee9564aa6ec 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -397,8 +397,7 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
397 /* poll for data ready */ 397 /* poll for data ready */
398 do { 398 do {
399 val = wl1271_spi_read32(wl, OCP_DATA_READ); 399 val = wl1271_spi_read32(wl, OCP_DATA_READ);
400 timeout--; 400 } while (!(val & OCP_READY_MASK) && --timeout);
401 } while (!(val & OCP_READY_MASK) && timeout);
402 401
403 if (!timeout) { 402 if (!timeout) {
404 wl1271_warning("Top register access timed out."); 403 wl1271_warning("Top register access timed out.");
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 00af065c77c..a288cc317d7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -121,6 +121,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
121 pad = pad - skb->len; 121 pad = pad - skb->len;
122 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; 122 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
123 123
124 /* if the packets are destined for AP (have a STA entry) send them
125 with AP rate policies, otherwise use default basic rates */
126 if (control->control.sta)
127 tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
128
124 desc->tx_attr = cpu_to_le16(tx_attr); 129 desc->tx_attr = cpu_to_le16(tx_attr);
125 130
126 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); 131 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
@@ -214,18 +219,50 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
214 return ret; 219 return ret;
215} 220}
216 221
222static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
223{
224 struct ieee80211_supported_band *band;
225 u32 enabled_rates = 0;
226 int bit;
227
228 band = wl->hw->wiphy->bands[wl->band];
229 for (bit = 0; bit < band->n_bitrates; bit++) {
230 if (rate_set & 0x1)
231 enabled_rates |= band->bitrates[bit].hw_value;
232 rate_set >>= 1;
233 }
234
235 return enabled_rates;
236}
237
217void wl1271_tx_work(struct work_struct *work) 238void wl1271_tx_work(struct work_struct *work)
218{ 239{
219 struct wl1271 *wl = container_of(work, struct wl1271, tx_work); 240 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
220 struct sk_buff *skb; 241 struct sk_buff *skb;
221 bool woken_up = false; 242 bool woken_up = false;
243 u32 sta_rates = 0;
222 int ret; 244 int ret;
223 245
246 /* check if the rates supported by the AP have changed */
247 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
248 &wl->flags))) {
249 unsigned long flags;
250 spin_lock_irqsave(&wl->wl_lock, flags);
251 sta_rates = wl->sta_rate_set;
252 spin_unlock_irqrestore(&wl->wl_lock, flags);
253 }
254
224 mutex_lock(&wl->mutex); 255 mutex_lock(&wl->mutex);
225 256
226 if (unlikely(wl->state == WL1271_STATE_OFF)) 257 if (unlikely(wl->state == WL1271_STATE_OFF))
227 goto out; 258 goto out;
228 259
260 /* if rates have changed, re-configure the rate policy */
261 if (unlikely(sta_rates)) {
262 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
263 wl1271_acx_rate_policies(wl);
264 }
265
229 while ((skb = skb_dequeue(&wl->tx_queue))) { 266 while ((skb = skb_dequeue(&wl->tx_queue))) {
230 if (!woken_up) { 267 if (!woken_up) {
231 ret = wl1271_ps_elp_wakeup(wl, false); 268 ret = wl1271_ps_elp_wakeup(wl, false);
@@ -240,18 +277,18 @@ void wl1271_tx_work(struct work_struct *work)
240 wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, " 277 wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
241 "stop queues"); 278 "stop queues");
242 ieee80211_stop_queues(wl->hw); 279 ieee80211_stop_queues(wl->hw);
243 wl->tx_queue_stopped = true; 280 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
244 skb_queue_head(&wl->tx_queue, skb); 281 skb_queue_head(&wl->tx_queue, skb);
245 goto out; 282 goto out;
246 } else if (ret < 0) { 283 } else if (ret < 0) {
247 dev_kfree_skb(skb); 284 dev_kfree_skb(skb);
248 goto out; 285 goto out;
249 } else if (wl->tx_queue_stopped) { 286 } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
287 &wl->flags)) {
250 /* firmware buffer has space, restart queues */ 288 /* firmware buffer has space, restart queues */
251 wl1271_debug(DEBUG_TX, 289 wl1271_debug(DEBUG_TX,
252 "complete_packet: waking queues"); 290 "complete_packet: waking queues");
253 ieee80211_wake_queues(wl->hw); 291 ieee80211_wake_queues(wl->hw);
254 wl->tx_queue_stopped = false;
255 } 292 }
256 } 293 }
257 294
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index f14deb0c851..2d555cc3050 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -869,7 +869,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
869} 869}
870 870
871static int zd_op_add_interface(struct ieee80211_hw *hw, 871static int zd_op_add_interface(struct ieee80211_hw *hw,
872 struct ieee80211_if_init_conf *conf) 872 struct ieee80211_vif *vif)
873{ 873{
874 struct zd_mac *mac = zd_hw_mac(hw); 874 struct zd_mac *mac = zd_hw_mac(hw);
875 875
@@ -877,22 +877,22 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
877 if (mac->type != NL80211_IFTYPE_UNSPECIFIED) 877 if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
878 return -EOPNOTSUPP; 878 return -EOPNOTSUPP;
879 879
880 switch (conf->type) { 880 switch (vif->type) {
881 case NL80211_IFTYPE_MONITOR: 881 case NL80211_IFTYPE_MONITOR:
882 case NL80211_IFTYPE_MESH_POINT: 882 case NL80211_IFTYPE_MESH_POINT:
883 case NL80211_IFTYPE_STATION: 883 case NL80211_IFTYPE_STATION:
884 case NL80211_IFTYPE_ADHOC: 884 case NL80211_IFTYPE_ADHOC:
885 mac->type = conf->type; 885 mac->type = vif->type;
886 break; 886 break;
887 default: 887 default:
888 return -EOPNOTSUPP; 888 return -EOPNOTSUPP;
889 } 889 }
890 890
891 return zd_write_mac_addr(&mac->chip, conf->mac_addr); 891 return zd_write_mac_addr(&mac->chip, vif->addr);
892} 892}
893 893
894static void zd_op_remove_interface(struct ieee80211_hw *hw, 894static void zd_op_remove_interface(struct ieee80211_hw *hw,
895 struct ieee80211_if_init_conf *conf) 895 struct ieee80211_vif *vif)
896{ 896{
897 struct zd_mac *mac = zd_hw_mac(hw); 897 struct zd_mac *mac = zd_hw_mac(hw);
898 mac->type = NL80211_IFTYPE_UNSPECIFIED; 898 mac->type = NL80211_IFTYPE_UNSPECIFIED;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 72d3e437e19..442fc111732 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1079,11 +1079,15 @@ static int eject_installer(struct usb_interface *intf)
1079 int r; 1079 int r;
1080 1080
1081 /* Find bulk out endpoint */ 1081 /* Find bulk out endpoint */
1082 endpoint = &iface_desc->endpoint[1].desc; 1082 for (r = 1; r >= 0; r--) {
1083 if (usb_endpoint_dir_out(endpoint) && 1083 endpoint = &iface_desc->endpoint[r].desc;
1084 usb_endpoint_xfer_bulk(endpoint)) { 1084 if (usb_endpoint_dir_out(endpoint) &&
1085 bulk_out_ep = endpoint->bEndpointAddress; 1085 usb_endpoint_xfer_bulk(endpoint)) {
1086 } else { 1086 bulk_out_ep = endpoint->bEndpointAddress;
1087 break;
1088 }
1089 }
1090 if (r == -1) {
1087 dev_err(&udev->dev, 1091 dev_err(&udev->dev,
1088 "zd1211rw: Could not find bulk out endpoint\n"); 1092 "zd1211rw: Could not find bulk out endpoint\n");
1089 return -ENODEV; 1093 return -ENODEV;
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 8c777ba4e2b..f7fe1aa03b4 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -925,11 +925,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
925 /* Set the MAC address in the EmacLite device */ 925 /* Set the MAC address in the EmacLite device */
926 xemaclite_set_mac_address(lp, ndev->dev_addr); 926 xemaclite_set_mac_address(lp, ndev->dev_addr);
927 927
928 dev_info(dev, 928 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
929 "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
930 ndev->dev_addr[0], ndev->dev_addr[1],
931 ndev->dev_addr[2], ndev->dev_addr[3],
932 ndev->dev_addr[4], ndev->dev_addr[5]);
933 929
934 ndev->netdev_ops = &xemaclite_netdev_ops; 930 ndev->netdev_ops = &xemaclite_netdev_ops;
935 ndev->flags &= ~IFF_MULTICAST; 931 ndev->flags &= ~IFF_MULTICAST;
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 0f773a9a3ff..8b231b30fd1 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -237,7 +237,7 @@ static const struct pci_id_info pci_id_tbl[] = {
237 { } 237 { }
238}; 238};
239 239
240static const struct pci_device_id yellowfin_pci_tbl[] = { 240static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
241 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 241 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
242 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 242 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
243 { } 243 { }
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b232693378c..a3ac4456e0b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -649,6 +649,7 @@ struct qeth_card_options {
649 int performance_stats; 649 int performance_stats;
650 int rx_sg_cb; 650 int rx_sg_cb;
651 enum qeth_ipa_isolation_modes isolation; 651 enum qeth_ipa_isolation_modes isolation;
652 int sniffer;
652}; 653};
653 654
654/* 655/*
@@ -737,6 +738,7 @@ struct qeth_card {
737 struct qeth_discipline discipline; 738 struct qeth_discipline discipline;
738 atomic_t force_alloc_skb; 739 atomic_t force_alloc_skb;
739 struct service_level qeth_service_level; 740 struct service_level qeth_service_level;
741 struct qdio_ssqd_desc ssqd;
740}; 742};
741 743
742struct qeth_card_list_struct { 744struct qeth_card_list_struct {
@@ -811,7 +813,8 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
811struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, 813struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
812 enum qeth_ipa_cmds, enum qeth_prot_versions); 814 enum qeth_ipa_cmds, enum qeth_prot_versions);
813int qeth_query_setadapterparms(struct qeth_card *); 815int qeth_query_setadapterparms(struct qeth_card *);
814int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *); 816int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
817 unsigned int, const char *);
815void qeth_queue_input_buffer(struct qeth_card *, int); 818void qeth_queue_input_buffer(struct qeth_card *, int);
816struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 819struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
817 struct qdio_buffer *, struct qdio_buffer_element **, int *, 820 struct qdio_buffer *, struct qdio_buffer_element **, int *,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d34804d5ece..fa8a519218a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -269,6 +269,7 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
269 card->qdio.init_pool.buf_count = bufcnt; 269 card->qdio.init_pool.buf_count = bufcnt;
270 return qeth_alloc_buffer_pool(card); 270 return qeth_alloc_buffer_pool(card);
271} 271}
272EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
272 273
273static int qeth_issue_next_read(struct qeth_card *card) 274static int qeth_issue_next_read(struct qeth_card *card)
274{ 275{
@@ -350,8 +351,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
350 if (IS_IPA(iob->data)) { 351 if (IS_IPA(iob->data)) {
351 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); 352 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
352 if (IS_IPA_REPLY(cmd)) { 353 if (IS_IPA_REPLY(cmd)) {
353 if (cmd->hdr.command < IPA_CMD_SETCCID || 354 if (cmd->hdr.command != IPA_CMD_SETCCID &&
354 cmd->hdr.command > IPA_CMD_MODCCID) 355 cmd->hdr.command != IPA_CMD_DELCCID &&
356 cmd->hdr.command != IPA_CMD_MODCCID &&
357 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
355 qeth_issue_ipa_msg(cmd, 358 qeth_issue_ipa_msg(cmd,
356 cmd->hdr.return_code, card); 359 cmd->hdr.return_code, card);
357 return cmd; 360 return cmd;
@@ -1100,11 +1103,6 @@ static int qeth_setup_card(struct qeth_card *card)
1100 card->thread_running_mask = 0; 1103 card->thread_running_mask = 0;
1101 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1104 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1102 INIT_LIST_HEAD(&card->ip_list); 1105 INIT_LIST_HEAD(&card->ip_list);
1103 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1104 if (!card->ip_tbd_list) {
1105 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1106 return -ENOMEM;
1107 }
1108 INIT_LIST_HEAD(card->ip_tbd_list); 1106 INIT_LIST_HEAD(card->ip_tbd_list);
1109 INIT_LIST_HEAD(&card->cmd_waiter_list); 1107 INIT_LIST_HEAD(&card->cmd_waiter_list);
1110 init_waitqueue_head(&card->wait_q); 1108 init_waitqueue_head(&card->wait_q);
@@ -1138,21 +1136,30 @@ static struct qeth_card *qeth_alloc_card(void)
1138 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1136 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1139 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); 1137 card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
1140 if (!card) 1138 if (!card)
1141 return NULL; 1139 goto out;
1142 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1140 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1143 if (qeth_setup_channel(&card->read)) { 1141 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1144 kfree(card); 1142 if (!card->ip_tbd_list) {
1145 return NULL; 1143 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1146 } 1144 goto out_card;
1147 if (qeth_setup_channel(&card->write)) {
1148 qeth_clean_channel(&card->read);
1149 kfree(card);
1150 return NULL;
1151 } 1145 }
1146 if (qeth_setup_channel(&card->read))
1147 goto out_ip;
1148 if (qeth_setup_channel(&card->write))
1149 goto out_channel;
1152 card->options.layer2 = -1; 1150 card->options.layer2 = -1;
1153 card->qeth_service_level.seq_print = qeth_core_sl_print; 1151 card->qeth_service_level.seq_print = qeth_core_sl_print;
1154 register_service_level(&card->qeth_service_level); 1152 register_service_level(&card->qeth_service_level);
1155 return card; 1153 return card;
1154
1155out_channel:
1156 qeth_clean_channel(&card->read);
1157out_ip:
1158 kfree(card->ip_tbd_list);
1159out_card:
1160 kfree(card);
1161out:
1162 return NULL;
1156} 1163}
1157 1164
1158static int qeth_determine_card_type(struct qeth_card *card) 1165static int qeth_determine_card_type(struct qeth_card *card)
@@ -1355,26 +1362,29 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1355 return ret; 1362 return ret;
1356} 1363}
1357 1364
1358static int qeth_get_unitaddr(struct qeth_card *card) 1365static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
1359{ 1366{
1360 int length; 1367 QETH_DBF_TEXT(SETUP, 2, "cfgunit");
1361 char *prcd;
1362 int rc;
1363
1364 QETH_DBF_TEXT(SETUP, 2, "getunit");
1365 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1366 if (rc) {
1367 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
1368 dev_name(&card->gdev->dev), rc);
1369 return rc;
1370 }
1371 card->info.chpid = prcd[30]; 1368 card->info.chpid = prcd[30];
1372 card->info.unit_addr2 = prcd[31]; 1369 card->info.unit_addr2 = prcd[31];
1373 card->info.cula = prcd[63]; 1370 card->info.cula = prcd[63];
1374 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && 1371 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1375 (prcd[0x11] == _ascebc['M'])); 1372 (prcd[0x11] == _ascebc['M']));
1376 kfree(prcd); 1373}
1377 return 0; 1374
1375static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1376{
1377 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1378
1379 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
1380 card->info.blkt.time_total = 250;
1381 card->info.blkt.inter_packet = 5;
1382 card->info.blkt.inter_packet_jumbo = 15;
1383 } else {
1384 card->info.blkt.time_total = 0;
1385 card->info.blkt.inter_packet = 0;
1386 card->info.blkt.inter_packet_jumbo = 0;
1387 }
1378} 1388}
1379 1389
1380static void qeth_init_tokens(struct qeth_card *card) 1390static void qeth_init_tokens(struct qeth_card *card)
@@ -2573,8 +2583,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
2573} 2583}
2574EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); 2584EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2575 2585
2576int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, 2586int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2577 const char *dbftext) 2587 unsigned int qdio_error, const char *dbftext)
2578{ 2588{
2579 if (qdio_error) { 2589 if (qdio_error) {
2580 QETH_DBF_TEXT(TRACE, 2, dbftext); 2590 QETH_DBF_TEXT(TRACE, 2, dbftext);
@@ -2584,7 +2594,11 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2584 QETH_DBF_TEXT_(QERR, 2, " F14=%02X", 2594 QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
2585 buf->element[14].flags & 0xff); 2595 buf->element[14].flags & 0xff);
2586 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); 2596 QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
2587 return 1; 2597 if ((buf->element[15].flags & 0xff) == 0x12) {
2598 card->stats.rx_dropped++;
2599 return 0;
2600 } else
2601 return 1;
2588 } 2602 }
2589 return 0; 2603 return 0;
2590} 2604}
@@ -2667,7 +2681,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
2667 qdio_err = 1; 2681 qdio_err = 1;
2668 } 2682 }
2669 } 2683 }
2670 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr"); 2684 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
2671 2685
2672 if (!qdio_err) 2686 if (!qdio_err)
2673 return QETH_SEND_ERROR_NONE; 2687 return QETH_SEND_ERROR_NONE;
@@ -3509,6 +3523,7 @@ void qeth_tx_timeout(struct net_device *dev)
3509{ 3523{
3510 struct qeth_card *card; 3524 struct qeth_card *card;
3511 3525
3526 QETH_DBF_TEXT(TRACE, 4, "txtimeo");
3512 card = dev->ml_priv; 3527 card = dev->ml_priv;
3513 card->stats.tx_errors++; 3528 card->stats.tx_errors++;
3514 qeth_schedule_recovery(card); 3529 qeth_schedule_recovery(card);
@@ -3847,9 +3862,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3847 3862
3848int qeth_core_hardsetup_card(struct qeth_card *card) 3863int qeth_core_hardsetup_card(struct qeth_card *card)
3849{ 3864{
3850 struct qdio_ssqd_desc *ssqd;
3851 int retries = 0; 3865 int retries = 0;
3852 int mpno = 0;
3853 int rc; 3866 int rc;
3854 3867
3855 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3868 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@@ -3882,31 +3895,6 @@ retriable:
3882 else 3895 else
3883 goto retry; 3896 goto retry;
3884 } 3897 }
3885
3886 rc = qeth_get_unitaddr(card);
3887 if (rc) {
3888 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3889 return rc;
3890 }
3891
3892 ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL);
3893 if (!ssqd) {
3894 rc = -ENOMEM;
3895 goto out;
3896 }
3897 rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd);
3898 if (rc == 0)
3899 mpno = ssqd->pcnt;
3900 kfree(ssqd);
3901
3902 if (mpno)
3903 mpno = min(mpno - 1, QETH_MAX_PORTNO);
3904 if (card->info.portno > mpno) {
3905 QETH_DBF_MESSAGE(2, "Device %s does not offer port number %d"
3906 "\n.", CARD_BUS_ID(card), card->info.portno);
3907 rc = -ENODEV;
3908 goto out;
3909 }
3910 qeth_init_tokens(card); 3898 qeth_init_tokens(card);
3911 qeth_init_func_level(card); 3899 qeth_init_func_level(card);
3912 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); 3900 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -3990,7 +3978,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
3990 struct qdio_buffer_element *element = *__element; 3978 struct qdio_buffer_element *element = *__element;
3991 int offset = *__offset; 3979 int offset = *__offset;
3992 struct sk_buff *skb = NULL; 3980 struct sk_buff *skb = NULL;
3993 int skb_len; 3981 int skb_len = 0;
3994 void *data_ptr; 3982 void *data_ptr;
3995 int data_len; 3983 int data_len;
3996 int headroom = 0; 3984 int headroom = 0;
@@ -4009,20 +3997,24 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4009 *hdr = element->addr + offset; 3997 *hdr = element->addr + offset;
4010 3998
4011 offset += sizeof(struct qeth_hdr); 3999 offset += sizeof(struct qeth_hdr);
4012 if (card->options.layer2) { 4000 switch ((*hdr)->hdr.l2.id) {
4013 if (card->info.type == QETH_CARD_TYPE_OSN) { 4001 case QETH_HEADER_TYPE_LAYER2:
4014 skb_len = (*hdr)->hdr.osn.pdu_length; 4002 skb_len = (*hdr)->hdr.l2.pkt_length;
4015 headroom = sizeof(struct qeth_hdr); 4003 break;
4016 } else { 4004 case QETH_HEADER_TYPE_LAYER3:
4017 skb_len = (*hdr)->hdr.l2.pkt_length;
4018 }
4019 } else {
4020 skb_len = (*hdr)->hdr.l3.length; 4005 skb_len = (*hdr)->hdr.l3.length;
4021 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 4006 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
4022 (card->info.link_type == QETH_LINK_TYPE_HSTR)) 4007 (card->info.link_type == QETH_LINK_TYPE_HSTR))
4023 headroom = TR_HLEN; 4008 headroom = TR_HLEN;
4024 else 4009 else
4025 headroom = ETH_HLEN; 4010 headroom = ETH_HLEN;
4011 break;
4012 case QETH_HEADER_TYPE_OSN:
4013 skb_len = (*hdr)->hdr.osn.pdu_length;
4014 headroom = sizeof(struct qeth_hdr);
4015 break;
4016 default:
4017 break;
4026 } 4018 }
4027 4019
4028 if (!skb_len) 4020 if (!skb_len)
@@ -4177,6 +4169,41 @@ void qeth_core_free_discipline(struct qeth_card *card)
4177 card->discipline.ccwgdriver = NULL; 4169 card->discipline.ccwgdriver = NULL;
4178} 4170}
4179 4171
4172static void qeth_determine_capabilities(struct qeth_card *card)
4173{
4174 int rc;
4175 int length;
4176 char *prcd;
4177
4178 QETH_DBF_TEXT(SETUP, 2, "detcapab");
4179 rc = ccw_device_set_online(CARD_DDEV(card));
4180 if (rc) {
4181 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4182 goto out;
4183 }
4184
4185
4186 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4187 if (rc) {
4188 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
4189 dev_name(&card->gdev->dev), rc);
4190 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4191 goto out_offline;
4192 }
4193 qeth_configure_unitaddr(card, prcd);
4194 qeth_configure_blkt_default(card, prcd);
4195 kfree(prcd);
4196
4197 rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
4198 if (rc)
4199 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4200
4201out_offline:
4202 ccw_device_set_offline(CARD_DDEV(card));
4203out:
4204 return;
4205}
4206
4180static int qeth_core_probe_device(struct ccwgroup_device *gdev) 4207static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4181{ 4208{
4182 struct qeth_card *card; 4209 struct qeth_card *card;
@@ -4242,6 +4269,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4242 write_lock_irqsave(&qeth_core_card_list.rwlock, flags); 4269 write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
4243 list_add_tail(&card->list, &qeth_core_card_list.list); 4270 list_add_tail(&card->list, &qeth_core_card_list.list);
4244 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); 4271 write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
4272
4273 qeth_determine_capabilities(card);
4245 return 0; 4274 return 0;
4246 4275
4247err_card: 4276err_card:
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 1ba51152f66..104a3351e02 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -156,6 +156,8 @@ enum qeth_ipa_return_codes {
156 IPA_RC_IP_TABLE_FULL = 0x0002, 156 IPA_RC_IP_TABLE_FULL = 0x0002,
157 IPA_RC_UNKNOWN_ERROR = 0x0003, 157 IPA_RC_UNKNOWN_ERROR = 0x0003,
158 IPA_RC_UNSUPPORTED_COMMAND = 0x0004, 158 IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
159 IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
160 IPA_RC_INVALID_FORMAT = 0x0006,
159 IPA_RC_DUP_IPV6_REMOTE = 0x0008, 161 IPA_RC_DUP_IPV6_REMOTE = 0x0008,
160 IPA_RC_DUP_IPV6_HOME = 0x0010, 162 IPA_RC_DUP_IPV6_HOME = 0x0010,
161 IPA_RC_UNREGISTERED_ADDR = 0x0011, 163 IPA_RC_UNREGISTERED_ADDR = 0x0011,
@@ -196,6 +198,11 @@ enum qeth_ipa_return_codes {
196 IPA_RC_INVALID_IP_VERSION2 = 0xf001, 198 IPA_RC_INVALID_IP_VERSION2 = 0xf001,
197 IPA_RC_FFFF = 0xffff 199 IPA_RC_FFFF = 0xffff
198}; 200};
201/* for DELIP */
202#define IPA_RC_IP_ADDRESS_NOT_DEFINED IPA_RC_PRIMARY_ALREADY_DEFINED
203/* for SET_DIAGNOSTIC_ASSIST */
204#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
205#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
199 206
200/* IPA function flags; each flag marks availability of respective function */ 207/* IPA function flags; each flag marks availability of respective function */
201enum qeth_ipa_funcs { 208enum qeth_ipa_funcs {
@@ -246,6 +253,7 @@ enum qeth_ipa_setadp_cmd {
246 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L, 253 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
247 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L, 254 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
248 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L, 255 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
256 IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L,
249 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L, 257 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
250}; 258};
251enum qeth_ipa_mac_ops { 259enum qeth_ipa_mac_ops {
@@ -424,6 +432,40 @@ struct qeth_create_destroy_address {
424 __u8 unique_id[8]; 432 __u8 unique_id[8];
425} __attribute__ ((packed)); 433} __attribute__ ((packed));
426 434
435/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/
436
437enum qeth_diags_cmds {
438 QETH_DIAGS_CMD_QUERY = 0x0001,
439 QETH_DIAGS_CMD_TRAP = 0x0002,
440 QETH_DIAGS_CMD_TRACE = 0x0004,
441 QETH_DIAGS_CMD_NOLOG = 0x0008,
442 QETH_DIAGS_CMD_DUMP = 0x0010,
443};
444
445enum qeth_diags_trace_types {
446 QETH_DIAGS_TYPE_HIPERSOCKET = 0x02,
447};
448
449enum qeth_diags_trace_cmds {
450 QETH_DIAGS_CMD_TRACE_ENABLE = 0x0001,
451 QETH_DIAGS_CMD_TRACE_DISABLE = 0x0002,
452 QETH_DIAGS_CMD_TRACE_MODIFY = 0x0004,
453 QETH_DIAGS_CMD_TRACE_REPLACE = 0x0008,
454 QETH_DIAGS_CMD_TRACE_QUERY = 0x0010,
455};
456
457struct qeth_ipacmd_diagass {
458 __u32 host_tod2;
459 __u32:32;
460 __u16 subcmd_len;
461 __u16:16;
462 __u32 subcmd;
463 __u8 type;
464 __u8 action;
465 __u16 options;
466 __u32:32;
467} __attribute__ ((packed));
468
427/* Header for each IPA command */ 469/* Header for each IPA command */
428struct qeth_ipacmd_hdr { 470struct qeth_ipacmd_hdr {
429 __u8 command; 471 __u8 command;
@@ -452,6 +494,7 @@ struct qeth_ipa_cmd {
452 struct qeth_create_destroy_address create_destroy_addr; 494 struct qeth_create_destroy_address create_destroy_addr;
453 struct qeth_ipacmd_setadpparms setadapterparms; 495 struct qeth_ipacmd_setadpparms setadapterparms;
454 struct qeth_set_routing setrtg; 496 struct qeth_set_routing setrtg;
497 struct qeth_ipacmd_diagass diagass;
455 } data; 498 } data;
456} __attribute__ ((packed)); 499} __attribute__ ((packed));
457 500
@@ -469,7 +512,6 @@ enum qeth_ipa_arp_return_codes {
469 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, 512 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
470}; 513};
471 514
472
473extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); 515extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
474extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); 516extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
475 517
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9ff2b36fdc4..88ae4357136 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -118,7 +118,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
118{ 118{
119 struct qeth_card *card = dev_get_drvdata(dev); 119 struct qeth_card *card = dev_get_drvdata(dev);
120 char *tmp; 120 char *tmp;
121 unsigned int portno; 121 unsigned int portno, limit;
122 122
123 if (!card) 123 if (!card)
124 return -EINVAL; 124 return -EINVAL;
@@ -128,9 +128,11 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
128 return -EPERM; 128 return -EPERM;
129 129
130 portno = simple_strtoul(buf, &tmp, 16); 130 portno = simple_strtoul(buf, &tmp, 16);
131 if (portno > QETH_MAX_PORTNO) { 131 if (portno > QETH_MAX_PORTNO)
132 return -EINVAL;
133 limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
134 if (portno > limit)
132 return -EINVAL; 135 return -EINVAL;
133 }
134 136
135 card->info.portno = portno; 137 card->info.portno = portno;
136 return count; 138 return count;
@@ -537,7 +539,7 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev,
537 struct qeth_card *card = dev_get_drvdata(dev); 539 struct qeth_card *card = dev_get_drvdata(dev);
538 540
539 return qeth_dev_blkt_store(card, buf, count, 541 return qeth_dev_blkt_store(card, buf, count,
540 &card->info.blkt.time_total, 1000); 542 &card->info.blkt.time_total, 5000);
541} 543}
542 544
543 545
@@ -559,7 +561,7 @@ static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
559 struct qeth_card *card = dev_get_drvdata(dev); 561 struct qeth_card *card = dev_get_drvdata(dev);
560 562
561 return qeth_dev_blkt_store(card, buf, count, 563 return qeth_dev_blkt_store(card, buf, count,
562 &card->info.blkt.inter_packet, 100); 564 &card->info.blkt.inter_packet, 1000);
563} 565}
564 566
565static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show, 567static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
@@ -580,7 +582,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
580 struct qeth_card *card = dev_get_drvdata(dev); 582 struct qeth_card *card = dev_get_drvdata(dev);
581 583
582 return qeth_dev_blkt_store(card, buf, count, 584 return qeth_dev_blkt_store(card, buf, count,
583 &card->info.blkt.inter_packet_jumbo, 100); 585 &card->info.blkt.inter_packet_jumbo, 1000);
584} 586}
585 587
586static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show, 588static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0b763396d5d..51fde6f2e0b 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -486,22 +486,14 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
486 case IPA_RC_L2_DUP_MAC: 486 case IPA_RC_L2_DUP_MAC:
487 case IPA_RC_L2_DUP_LAYER3_MAC: 487 case IPA_RC_L2_DUP_LAYER3_MAC:
488 dev_warn(&card->gdev->dev, 488 dev_warn(&card->gdev->dev,
489 "MAC address " 489 "MAC address %pM already exists\n",
490 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " 490 card->dev->dev_addr);
491 "already exists\n",
492 card->dev->dev_addr[0], card->dev->dev_addr[1],
493 card->dev->dev_addr[2], card->dev->dev_addr[3],
494 card->dev->dev_addr[4], card->dev->dev_addr[5]);
495 break; 491 break;
496 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: 492 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
497 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: 493 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
498 dev_warn(&card->gdev->dev, 494 dev_warn(&card->gdev->dev,
499 "MAC address " 495 "MAC address %pM is not authorized\n",
500 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " 496 card->dev->dev_addr);
501 "is not authorized\n",
502 card->dev->dev_addr[0], card->dev->dev_addr[1],
503 card->dev->dev_addr[2], card->dev->dev_addr[3],
504 card->dev->dev_addr[4], card->dev->dev_addr[5]);
505 break; 497 break;
506 default: 498 default:
507 break; 499 break;
@@ -512,12 +504,8 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
512 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, 504 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
513 OSA_ADDR_LEN); 505 OSA_ADDR_LEN);
514 dev_info(&card->gdev->dev, 506 dev_info(&card->gdev->dev,
515 "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " 507 "MAC address %pM successfully registered on device %s\n",
516 "successfully registered on device %s\n", 508 card->dev->dev_addr, card->dev->name);
517 card->dev->dev_addr[0], card->dev->dev_addr[1],
518 card->dev->dev_addr[2], card->dev->dev_addr[3],
519 card->dev->dev_addr[4], card->dev->dev_addr[5],
520 card->dev->name);
521 } 509 }
522 return 0; 510 return 0;
523} 511}
@@ -634,7 +622,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
634 for (dm = dev->mc_list; dm; dm = dm->next) 622 for (dm = dev->mc_list; dm; dm = dm->next)
635 qeth_l2_add_mc(card, dm->da_addr, 0); 623 qeth_l2_add_mc(card, dm->da_addr, 0);
636 624
637 list_for_each_entry(ha, &dev->uc.list, list) 625 netdev_for_each_uc_addr(ha, dev)
638 qeth_l2_add_mc(card, ha->addr, 1); 626 qeth_l2_add_mc(card, ha->addr, 1);
639 627
640 spin_unlock_bh(&card->mclock); 628 spin_unlock_bh(&card->mclock);
@@ -781,7 +769,8 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
781 index = i % QDIO_MAX_BUFFERS_PER_Q; 769 index = i % QDIO_MAX_BUFFERS_PER_Q;
782 buffer = &card->qdio.in_q->bufs[index]; 770 buffer = &card->qdio.in_q->bufs[index];
783 if (!(qdio_err && 771 if (!(qdio_err &&
784 qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr"))) 772 qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
773 "qinerr")))
785 qeth_l2_process_inbound_buffer(card, buffer, index); 774 qeth_l2_process_inbound_buffer(card, buffer, index);
786 /* clear buffer and give back to hardware */ 775 /* clear buffer and give back to hardware */
787 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 776 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
@@ -938,7 +927,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
938 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 927 QETH_DBF_TEXT(SETUP, 2, "setonlin");
939 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 928 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
940 929
941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
942 recover_flag = card->state; 930 recover_flag = card->state;
943 rc = qeth_core_hardsetup_card(card); 931 rc = qeth_core_hardsetup_card(card);
944 if (rc) { 932 if (rc) {
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 321988fa9f7..8447d233d0b 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,6 +13,8 @@
13 13
14#include "qeth_core.h" 14#include "qeth_core.h"
15 15
16#define QETH_SNIFF_AVAIL 0x0008
17
16struct qeth_ipaddr { 18struct qeth_ipaddr {
17 struct list_head entry; 19 struct list_head entry;
18 enum qeth_ip_types type; 20 enum qeth_ip_types type;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fd1b6ed3721..5475834ab91 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -242,6 +242,8 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
242 struct qeth_ipaddr *tmp, *t; 242 struct qeth_ipaddr *tmp, *t;
243 int found = 0; 243 int found = 0;
244 244
245 if (card->options.sniffer)
246 return 0;
245 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) { 247 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
246 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) && 248 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
247 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC)) 249 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
@@ -457,6 +459,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
457 QETH_DBF_TEXT(TRACE, 2, "sdiplist"); 459 QETH_DBF_TEXT(TRACE, 2, "sdiplist");
458 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); 460 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
459 461
462 if (card->options.sniffer)
463 return;
460 spin_lock_irqsave(&card->ip_lock, flags); 464 spin_lock_irqsave(&card->ip_lock, flags);
461 tbd_list = card->ip_tbd_list; 465 tbd_list = card->ip_tbd_list;
462 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); 466 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -495,7 +499,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
495 spin_unlock_irqrestore(&card->ip_lock, flags); 499 spin_unlock_irqrestore(&card->ip_lock, flags);
496 rc = qeth_l3_deregister_addr_entry(card, addr); 500 rc = qeth_l3_deregister_addr_entry(card, addr);
497 spin_lock_irqsave(&card->ip_lock, flags); 501 spin_lock_irqsave(&card->ip_lock, flags);
498 if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED)) 502 if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
499 kfree(addr); 503 kfree(addr);
500 else 504 else
501 list_add_tail(&addr->entry, &card->ip_list); 505 list_add_tail(&addr->entry, &card->ip_list);
@@ -513,6 +517,8 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
513 unsigned long flags; 517 unsigned long flags;
514 518
515 QETH_DBF_TEXT(TRACE, 4, "clearip"); 519 QETH_DBF_TEXT(TRACE, 4, "clearip");
520 if (recover && card->options.sniffer)
521 return;
516 spin_lock_irqsave(&card->ip_lock, flags); 522 spin_lock_irqsave(&card->ip_lock, flags);
517 /* clear todo list */ 523 /* clear todo list */
518 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) { 524 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
@@ -1674,6 +1680,76 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
1674 return rc; 1680 return rc;
1675} 1681}
1676 1682
1683static int
1684qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
1685 unsigned long data)
1686{
1687 struct qeth_ipa_cmd *cmd;
1688 __u16 rc;
1689
1690 QETH_DBF_TEXT(SETUP, 2, "diastrcb");
1691
1692 cmd = (struct qeth_ipa_cmd *)data;
1693 rc = cmd->hdr.return_code;
1694 if (rc) {
1695 QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
1696 if (cmd->data.diagass.action == QETH_DIAGS_CMD_TRACE_ENABLE) {
1697 switch (rc) {
1698 case IPA_RC_HARDWARE_AUTH_ERROR:
1699 dev_warn(&card->gdev->dev, "The device is not "
1700 "authorized to run as a HiperSockets "
1701 "network traffic analyzer\n");
1702 break;
1703 case IPA_RC_TRACE_ALREADY_ACTIVE:
1704 dev_warn(&card->gdev->dev, "A HiperSockets "
1705 "network traffic analyzer is already "
1706 "active in the HiperSockets LAN\n");
1707 break;
1708 default:
1709 break;
1710 }
1711 }
1712 return 0;
1713 }
1714
1715 switch (cmd->data.diagass.action) {
1716 case QETH_DIAGS_CMD_TRACE_QUERY:
1717 break;
1718 case QETH_DIAGS_CMD_TRACE_DISABLE:
1719 card->info.promisc_mode = SET_PROMISC_MODE_OFF;
1720 dev_info(&card->gdev->dev, "The HiperSockets network traffic "
1721 "analyzer is deactivated\n");
1722 break;
1723 case QETH_DIAGS_CMD_TRACE_ENABLE:
1724 card->info.promisc_mode = SET_PROMISC_MODE_ON;
1725 dev_info(&card->gdev->dev, "The HiperSockets network traffic "
1726 "analyzer is activated\n");
1727 break;
1728 default:
1729 QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
1730 cmd->data.diagass.action, QETH_CARD_IFNAME(card));
1731 }
1732
1733 return 0;
1734}
1735
1736static int
1737qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1738{
1739 struct qeth_cmd_buffer *iob;
1740 struct qeth_ipa_cmd *cmd;
1741
1742 QETH_DBF_TEXT(SETUP, 2, "diagtrac");
1743
1744 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
1745 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1746 cmd->data.diagass.subcmd_len = 16;
1747 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
1748 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
1749 cmd->data.diagass.action = diags_cmd;
1750 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
1751}
1752
1677static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, 1753static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1678 struct net_device *dev) 1754 struct net_device *dev)
1679{ 1755{
@@ -1951,7 +2027,10 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
1951 case QETH_CAST_ANYCAST: 2027 case QETH_CAST_ANYCAST:
1952 case QETH_CAST_NOCAST: 2028 case QETH_CAST_NOCAST:
1953 default: 2029 default:
1954 skb->pkt_type = PACKET_HOST; 2030 if (card->options.sniffer)
2031 skb->pkt_type = PACKET_OTHERHOST;
2032 else
2033 skb->pkt_type = PACKET_HOST;
1955 memcpy(tg_addr, card->dev->dev_addr, 2034 memcpy(tg_addr, card->dev->dev_addr,
1956 card->dev->addr_len); 2035 card->dev->addr_len);
1957 } 2036 }
@@ -2007,7 +2086,6 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2007 int offset; 2086 int offset;
2008 __u16 vlan_tag = 0; 2087 __u16 vlan_tag = 0;
2009 unsigned int len; 2088 unsigned int len;
2010
2011 /* get first element of current buffer */ 2089 /* get first element of current buffer */
2012 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 2090 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2013 offset = 0; 2091 offset = 0;
@@ -2026,7 +2104,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2026 case QETH_HEADER_TYPE_LAYER3: 2104 case QETH_HEADER_TYPE_LAYER3:
2027 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); 2105 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
2028 len = skb->len; 2106 len = skb->len;
2029 if (vlan_tag) 2107 if (vlan_tag && !card->options.sniffer)
2030 if (card->vlangrp) 2108 if (card->vlangrp)
2031 vlan_hwaccel_rx(skb, card->vlangrp, 2109 vlan_hwaccel_rx(skb, card->vlangrp,
2032 vlan_tag); 2110 vlan_tag);
@@ -2037,6 +2115,16 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2037 else 2115 else
2038 netif_rx(skb); 2116 netif_rx(skb);
2039 break; 2117 break;
2118 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
2119 skb->pkt_type = PACKET_HOST;
2120 skb->protocol = eth_type_trans(skb, skb->dev);
2121 if (card->options.checksum_type == NO_CHECKSUMMING)
2122 skb->ip_summed = CHECKSUM_UNNECESSARY;
2123 else
2124 skb->ip_summed = CHECKSUM_NONE;
2125 len = skb->len;
2126 netif_receive_skb(skb);
2127 break;
2040 default: 2128 default:
2041 dev_kfree_skb_any(skb); 2129 dev_kfree_skb_any(skb);
2042 QETH_DBF_TEXT(TRACE, 3, "inbunkno"); 2130 QETH_DBF_TEXT(TRACE, 3, "inbunkno");
@@ -2118,6 +2206,9 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2118 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 2206 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
2119 2207
2120 qeth_set_allowed_threads(card, 0, 1); 2208 qeth_set_allowed_threads(card, 0, 1);
2209 if (card->options.sniffer &&
2210 (card->info.promisc_mode == SET_PROMISC_MODE_ON))
2211 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
2121 if (card->read.state == CH_STATE_UP && 2212 if (card->read.state == CH_STATE_UP &&
2122 card->write.state == CH_STATE_UP && 2213 card->write.state == CH_STATE_UP &&
2123 (card->state == CARD_STATE_UP)) { 2214 (card->state == CARD_STATE_UP)) {
@@ -2162,6 +2253,36 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2162 return rc; 2253 return rc;
2163} 2254}
2164 2255
2256/*
2257 * test for and Switch promiscuous mode (on or off)
2258 * either for guestlan or HiperSocket Sniffer
2259 */
2260static void
2261qeth_l3_handle_promisc_mode(struct qeth_card *card)
2262{
2263 struct net_device *dev = card->dev;
2264
2265 if (((dev->flags & IFF_PROMISC) &&
2266 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
2267 (!(dev->flags & IFF_PROMISC) &&
2268 (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
2269 return;
2270
2271 if (card->info.guestlan) { /* Guestlan trace */
2272 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2273 qeth_setadp_promisc_mode(card);
2274 } else if (card->options.sniffer && /* HiperSockets trace */
2275 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
2276 if (dev->flags & IFF_PROMISC) {
2277 QETH_DBF_TEXT(TRACE, 3, "+promisc");
2278 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
2279 } else {
2280 QETH_DBF_TEXT(TRACE, 3, "-promisc");
2281 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
2282 }
2283 }
2284}
2285
2165static void qeth_l3_set_multicast_list(struct net_device *dev) 2286static void qeth_l3_set_multicast_list(struct net_device *dev)
2166{ 2287{
2167 struct qeth_card *card = dev->ml_priv; 2288 struct qeth_card *card = dev->ml_priv;
@@ -2170,15 +2291,17 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
2170 if (qeth_threads_running(card, QETH_RECOVER_THREAD) && 2291 if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
2171 (card->state != CARD_STATE_UP)) 2292 (card->state != CARD_STATE_UP))
2172 return; 2293 return;
2173 qeth_l3_delete_mc_addresses(card); 2294 if (!card->options.sniffer) {
2174 qeth_l3_add_multicast_ipv4(card); 2295 qeth_l3_delete_mc_addresses(card);
2296 qeth_l3_add_multicast_ipv4(card);
2175#ifdef CONFIG_QETH_IPV6 2297#ifdef CONFIG_QETH_IPV6
2176 qeth_l3_add_multicast_ipv6(card); 2298 qeth_l3_add_multicast_ipv6(card);
2177#endif 2299#endif
2178 qeth_l3_set_ip_addr_list(card); 2300 qeth_l3_set_ip_addr_list(card);
2179 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 2301 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
2180 return; 2302 return;
2181 qeth_setadp_promisc_mode(card); 2303 }
2304 qeth_l3_handle_promisc_mode(card);
2182} 2305}
2183 2306
2184static const char *qeth_l3_arp_get_error_cause(int *rc) 2307static const char *qeth_l3_arp_get_error_cause(int *rc)
@@ -2778,8 +2901,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2778 int nr_frags; 2901 int nr_frags;
2779 2902
2780 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2903 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2781 (skb->protocol != htons(ETH_P_IPV6)) && 2904 (((skb->protocol != htons(ETH_P_IPV6)) &&
2782 (skb->protocol != htons(ETH_P_IP))) 2905 (skb->protocol != htons(ETH_P_IP))) ||
2906 card->options.sniffer))
2783 goto tx_drop; 2907 goto tx_drop;
2784 2908
2785 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 2909 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
@@ -3155,7 +3279,7 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
3155 index = i % QDIO_MAX_BUFFERS_PER_Q; 3279 index = i % QDIO_MAX_BUFFERS_PER_Q;
3156 buffer = &card->qdio.in_q->bufs[index]; 3280 buffer = &card->qdio.in_q->bufs[index];
3157 if (!(qdio_err && 3281 if (!(qdio_err &&
3158 qeth_check_qdio_errors(buffer->buffer, 3282 qeth_check_qdio_errors(card, buffer->buffer,
3159 qdio_err, "qinerr"))) 3283 qdio_err, "qinerr")))
3160 qeth_l3_process_inbound_buffer(card, buffer, index); 3284 qeth_l3_process_inbound_buffer(card, buffer, index);
3161 /* clear buffer and give back to hardware */ 3285 /* clear buffer and give back to hardware */
@@ -3214,8 +3338,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3214 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 3338 QETH_DBF_TEXT(SETUP, 2, "setonlin");
3215 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 3339 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
3216 3340
3217 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3218
3219 recover_flag = card->state; 3341 recover_flag = card->state;
3220 rc = qeth_core_hardsetup_card(card); 3342 rc = qeth_core_hardsetup_card(card);
3221 if (rc) { 3343 if (rc) {
@@ -3250,20 +3372,22 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3250 goto out_remove; 3372 goto out_remove;
3251 } else 3373 } else
3252 card->lan_online = 1; 3374 card->lan_online = 1;
3253 qeth_l3_set_large_send(card, card->options.large_send);
3254 3375
3255 rc = qeth_l3_setadapter_parms(card); 3376 rc = qeth_l3_setadapter_parms(card);
3256 if (rc) 3377 if (rc)
3257 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3378 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3258 rc = qeth_l3_start_ipassists(card); 3379 if (!card->options.sniffer) {
3259 if (rc) 3380 rc = qeth_l3_start_ipassists(card);
3260 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 3381 if (rc)
3261 rc = qeth_l3_setrouting_v4(card); 3382 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3262 if (rc) 3383 qeth_l3_set_large_send(card, card->options.large_send);
3263 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 3384 rc = qeth_l3_setrouting_v4(card);
3264 rc = qeth_l3_setrouting_v6(card); 3385 if (rc)
3265 if (rc) 3386 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
3266 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 3387 rc = qeth_l3_setrouting_v6(card);
3388 if (rc)
3389 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3390 }
3267 netif_tx_disable(card->dev); 3391 netif_tx_disable(card->dev);
3268 3392
3269 rc = qeth_init_qdio_queues(card); 3393 rc = qeth_init_qdio_queues(card);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 3360b0941aa..3f08b11274a 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -319,6 +319,61 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
319static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, 319static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
320 qeth_l3_dev_checksum_store); 320 qeth_l3_dev_checksum_store);
321 321
322static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct qeth_card *card = dev_get_drvdata(dev);
326
327 if (!card)
328 return -EINVAL;
329
330 return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
331}
332
333static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
334 struct device_attribute *attr, const char *buf, size_t count)
335{
336 struct qeth_card *card = dev_get_drvdata(dev);
337 int ret;
338 unsigned long i;
339
340 if (!card)
341 return -EINVAL;
342
343 if (card->info.type != QETH_CARD_TYPE_IQD)
344 return -EPERM;
345
346 if ((card->state != CARD_STATE_DOWN) &&
347 (card->state != CARD_STATE_RECOVER))
348 return -EPERM;
349
350 ret = strict_strtoul(buf, 16, &i);
351 if (ret)
352 return -EINVAL;
353 switch (i) {
354 case 0:
355 card->options.sniffer = i;
356 break;
357 case 1:
358 ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
359 if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
360 card->options.sniffer = i;
361 if (card->qdio.init_pool.buf_count !=
362 QETH_IN_BUF_COUNT_MAX)
363 qeth_realloc_buffer_pool(card,
364 QETH_IN_BUF_COUNT_MAX);
365 break;
366 } else
367 return -EPERM;
368 default: /* fall through */
369 return -EINVAL;
370 }
371 return count;
372}
373
374static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
375 qeth_l3_dev_sniffer_store);
376
322static ssize_t qeth_l3_dev_large_send_show(struct device *dev, 377static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
323 struct device_attribute *attr, char *buf) 378 struct device_attribute *attr, char *buf)
324{ 379{
@@ -373,6 +428,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
373 &dev_attr_broadcast_mode.attr, 428 &dev_attr_broadcast_mode.attr,
374 &dev_attr_canonical_macaddr.attr, 429 &dev_attr_canonical_macaddr.attr,
375 &dev_attr_checksumming.attr, 430 &dev_attr_checksumming.attr,
431 &dev_attr_sniffer.attr,
376 &dev_attr_large_send.attr, 432 &dev_attr_large_send.attr,
377 NULL, 433 NULL,
378}; 434};
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 0d490c164db..9086047c32d 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -482,15 +482,6 @@ struct ieee80211_header_data {
482 u16 seq_ctrl; 482 u16 seq_ctrl;
483}; 483};
484 484
485struct ieee80211_hdr_3addr {
486 u16 frame_ctl;
487 u16 duration_id;
488 u8 addr1[ETH_ALEN];
489 u8 addr2[ETH_ALEN];
490 u8 addr3[ETH_ALEN];
491 u16 seq_ctl;
492} __attribute__ ((packed));
493
494struct ieee80211_hdr_4addr { 485struct ieee80211_hdr_4addr {
495 u16 frame_ctl; 486 u16 frame_ctl;
496 u16 duration_id; 487 u16 duration_id;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index c7c645af0eb..a2150670ef5 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -203,7 +203,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
203 203
204 enqueue_mgmt(ieee,skb); 204 enqueue_mgmt(ieee,skb);
205 }else{ 205 }else{
206 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4); 206 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
207 207
208 if (ieee->seq_ctrl[0] == 0xFFF) 208 if (ieee->seq_ctrl[0] == 0xFFF)
209 ieee->seq_ctrl[0] = 0; 209 ieee->seq_ctrl[0] = 0;
@@ -220,7 +220,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
220 spin_unlock_irqrestore(&ieee->lock, flags); 220 spin_unlock_irqrestore(&ieee->lock, flags);
221 spin_lock_irqsave(&ieee->mgmt_tx_lock, flags); 221 spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
222 222
223 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); 223 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
224 224
225 if (ieee->seq_ctrl[0] == 0xFFF) 225 if (ieee->seq_ctrl[0] == 0xFFF)
226 ieee->seq_ctrl[0] = 0; 226 ieee->seq_ctrl[0] = 0;
@@ -246,7 +246,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
246 246
247 if(single){ 247 if(single){
248 248
249 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); 249 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
250 250
251 if (ieee->seq_ctrl[0] == 0xFFF) 251 if (ieee->seq_ctrl[0] == 0xFFF)
252 ieee->seq_ctrl[0] = 0; 252 ieee->seq_ctrl[0] = 0;
@@ -259,7 +259,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
259 259
260 }else{ 260 }else{
261 261
262 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); 262 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
263 263
264 if (ieee->seq_ctrl[0] == 0xFFF) 264 if (ieee->seq_ctrl[0] == 0xFFF)
265 ieee->seq_ctrl[0] = 0; 265 ieee->seq_ctrl[0] = 0;
@@ -287,7 +287,7 @@ inline struct sk_buff *ieee80211_disassociate_skb(
287 return NULL; 287 return NULL;
288 288
289 disass = (struct ieee80211_disassoc_frame *) skb_put(skb,sizeof(struct ieee80211_disassoc_frame)); 289 disass = (struct ieee80211_disassoc_frame *) skb_put(skb,sizeof(struct ieee80211_disassoc_frame));
290 disass->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_DISASSOC); 290 disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
291 disass->header.duration_id = 0; 291 disass->header.duration_id = 0;
292 292
293 memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN); 293 memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN);
@@ -905,7 +905,7 @@ struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
905 assoc = (struct ieee80211_assoc_response_frame *) 905 assoc = (struct ieee80211_assoc_response_frame *)
906 skb_put(skb,sizeof(struct ieee80211_assoc_response_frame)); 906 skb_put(skb,sizeof(struct ieee80211_assoc_response_frame));
907 907
908 assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP); 908 assoc->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
909 memcpy(assoc->header.addr1, dest,ETH_ALEN); 909 memcpy(assoc->header.addr1, dest,ETH_ALEN);
910 memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN); 910 memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
911 memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN); 911 memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -981,7 +981,7 @@ struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
981 memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN); 981 memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
982 memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN); 982 memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
983 983
984 hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | 984 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
985 IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS | 985 IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
986 (pwr ? IEEE80211_FCTL_PM:0)); 986 (pwr ? IEEE80211_FCTL_PM:0));
987 987
@@ -1084,7 +1084,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
1084 skb_put(skb, sizeof(struct ieee80211_assoc_request_frame)); 1084 skb_put(skb, sizeof(struct ieee80211_assoc_request_frame));
1085 1085
1086 1086
1087 hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ; 1087 hdr->header.frame_control = IEEE80211_STYPE_ASSOC_REQ;
1088 hdr->header.duration_id= 37; //FIXME 1088 hdr->header.duration_id= 37; //FIXME
1089 memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN); 1089 memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
1090 memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN); 1090 memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -1786,11 +1786,11 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
1786 1786
1787 tasklet_schedule(&ieee->ps_task); 1787 tasklet_schedule(&ieee->ps_task);
1788 1788
1789 if(WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP && 1789 if (WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_PROBE_RESP &&
1790 WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON) 1790 WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_BEACON)
1791 ieee->last_rx_ps_time = jiffies; 1791 ieee->last_rx_ps_time = jiffies;
1792 1792
1793 switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { 1793 switch (WLAN_FC_GET_STYPE(header->frame_control)) {
1794 1794
1795 case IEEE80211_STYPE_ASSOC_RESP: 1795 case IEEE80211_STYPE_ASSOC_RESP:
1796 case IEEE80211_STYPE_REASSOC_RESP: 1796 case IEEE80211_STYPE_REASSOC_RESP:
@@ -2064,7 +2064,7 @@ void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
2064 2064
2065 header = (struct ieee80211_hdr_3addr *) skb->data; 2065 header = (struct ieee80211_hdr_3addr *) skb->data;
2066 2066
2067 header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4); 2067 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
2068 2068
2069 if (ieee->seq_ctrl[0] == 0xFFF) 2069 if (ieee->seq_ctrl[0] == 0xFFF)
2070 ieee->seq_ctrl[0] = 0; 2070 ieee->seq_ctrl[0] = 0;
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index e0f13efdb15..1847f38b9f2 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1890,7 +1890,7 @@ rate)
1890 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev); 1890 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1891 int mode; 1891 int mode;
1892 struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data; 1892 struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data;
1893 short morefrag = (h->frame_ctl) & IEEE80211_FCTL_MOREFRAGS; 1893 short morefrag = (h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
1894 unsigned long flags; 1894 unsigned long flags;
1895 int priority; 1895 int priority;
1896 1896
@@ -2158,7 +2158,7 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
2158 TxDescDuration = ThisFrameTime + aSifsTime + AckTime; 2158 TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
2159 } 2159 }
2160 2160
2161 if(!(frag_hdr->frame_ctl & IEEE80211_FCTL_MOREFRAGS)) { //no more fragment 2161 if (!(frag_hdr->frame_control & IEEE80211_FCTL_MOREFRAGS)) {
2162 // ThisFrame-ACK. 2162 // ThisFrame-ACK.
2163 Duration = aSifsTime + AckTime; 2163 Duration = aSifsTime + AckTime;
2164 } else { // One or more fragments remained. 2164 } else { // One or more fragments remained.
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index ac389024796..0d22e3692fe 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1194,9 +1194,7 @@ static const struct net_device_ops wl_netdev_ops =
1194 .ndo_stop = &wl_adapter_close, 1194 .ndo_stop = &wl_adapter_close,
1195 .ndo_do_ioctl = &wl_ioctl, 1195 .ndo_do_ioctl = &wl_ioctl,
1196 1196
1197#ifdef HAVE_TX_TIMEOUT
1198 .ndo_tx_timeout = &wl_tx_timeout, 1197 .ndo_tx_timeout = &wl_tx_timeout,
1199#endif
1200 1198
1201#ifdef CONFIG_NET_POLL_CONTROLLER 1199#ifdef CONFIG_NET_POLL_CONTROLLER
1202 .ndo_poll_controller = wl_poll, 1200 .ndo_poll_controller = wl_poll,
@@ -1270,9 +1268,7 @@ struct net_device * wl_device_alloc( void )
1270 dev->stop = &wl_adapter_close; 1268 dev->stop = &wl_adapter_close;
1271 dev->do_ioctl = &wl_ioctl; 1269 dev->do_ioctl = &wl_ioctl;
1272 1270
1273#ifdef HAVE_TX_TIMEOUT
1274 dev->tx_timeout = &wl_tx_timeout; 1271 dev->tx_timeout = &wl_tx_timeout;
1275#endif
1276 1272
1277#ifdef CONFIG_NET_POLL_CONTROLLER 1273#ifdef CONFIG_NET_POLL_CONTROLLER
1278 dev->poll_controller = wl_poll; 1274 dev->poll_controller = wl_poll;
@@ -1280,9 +1276,7 @@ struct net_device * wl_device_alloc( void )
1280 1276
1281#endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) 1277#endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
1282 1278
1283#ifdef HAVE_TX_TIMEOUT
1284 dev->watchdog_timeo = TX_TIMEOUT; 1279 dev->watchdog_timeo = TX_TIMEOUT;
1285#endif
1286 1280
1287 dev->ethtool_ops = &wl_ethtool_ops; 1281 dev->ethtool_ops = &wl_ethtool_ops;
1288 1282
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
new file mode 100644
index 00000000000..9e9355367bb
--- /dev/null
+++ b/drivers/vhost/Kconfig
@@ -0,0 +1,11 @@
1config VHOST_NET
2 tristate "Host kernel accelerator for virtio net (EXPERIMENTAL)"
3 depends on NET && EVENTFD && (TUN || !TUN) && EXPERIMENTAL
4 ---help---
5 This kernel module can be loaded in host kernel to accelerate
6 guest networking with virtio_net. Not to be confused with virtio_net
7 module itself which needs to be loaded in guest kernel.
8
9 To compile this driver as a module, choose M here: the module will
10 be called vhost_net.
11
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
new file mode 100644
index 00000000000..72dd02050bb
--- /dev/null
+++ b/drivers/vhost/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_VHOST_NET) += vhost_net.o
2vhost_net-y := vhost.o net.o
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
new file mode 100644
index 00000000000..4c8928319e1
--- /dev/null
+++ b/drivers/vhost/net.c
@@ -0,0 +1,661 @@
1/* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * virtio-net server in host kernel.
7 */
8
9#include <linux/compat.h>
10#include <linux/eventfd.h>
11#include <linux/vhost.h>
12#include <linux/virtio_net.h>
13#include <linux/mmu_context.h>
14#include <linux/miscdevice.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/workqueue.h>
18#include <linux/rcupdate.h>
19#include <linux/file.h>
20
21#include <linux/net.h>
22#include <linux/if_packet.h>
23#include <linux/if_arp.h>
24#include <linux/if_tun.h>
25
26#include <net/sock.h>
27
28#include "vhost.h"
29
30/* Max number of bytes transferred before requeueing the job.
31 * Using this limit prevents one virtqueue from starving others. */
32#define VHOST_NET_WEIGHT 0x80000
33
34enum {
35 VHOST_NET_VQ_RX = 0,
36 VHOST_NET_VQ_TX = 1,
37 VHOST_NET_VQ_MAX = 2,
38};
39
40enum vhost_net_poll_state {
41 VHOST_NET_POLL_DISABLED = 0,
42 VHOST_NET_POLL_STARTED = 1,
43 VHOST_NET_POLL_STOPPED = 2,
44};
45
46struct vhost_net {
47 struct vhost_dev dev;
48 struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
49 struct vhost_poll poll[VHOST_NET_VQ_MAX];
50 /* Tells us whether we are polling a socket for TX.
51 * We only do this when socket buffer fills up.
52 * Protected by tx vq lock. */
53 enum vhost_net_poll_state tx_poll_state;
54};
55
56/* Pop first len bytes from iovec. Return number of segments used. */
57static int move_iovec_hdr(struct iovec *from, struct iovec *to,
58 size_t len, int iov_count)
59{
60 int seg = 0;
61 size_t size;
62 while (len && seg < iov_count) {
63 size = min(from->iov_len, len);
64 to->iov_base = from->iov_base;
65 to->iov_len = size;
66 from->iov_len -= size;
67 from->iov_base += size;
68 len -= size;
69 ++from;
70 ++to;
71 ++seg;
72 }
73 return seg;
74}
75
76/* Caller must have TX VQ lock */
77static void tx_poll_stop(struct vhost_net *net)
78{
79 if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
80 return;
81 vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
82 net->tx_poll_state = VHOST_NET_POLL_STOPPED;
83}
84
85/* Caller must have TX VQ lock */
86static void tx_poll_start(struct vhost_net *net, struct socket *sock)
87{
88 if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
89 return;
90 vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
91 net->tx_poll_state = VHOST_NET_POLL_STARTED;
92}
93
94/* Expects to be always run from workqueue - which acts as
95 * read-size critical section for our kind of RCU. */
96static void handle_tx(struct vhost_net *net)
97{
98 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
99 unsigned head, out, in, s;
100 struct msghdr msg = {
101 .msg_name = NULL,
102 .msg_namelen = 0,
103 .msg_control = NULL,
104 .msg_controllen = 0,
105 .msg_iov = vq->iov,
106 .msg_flags = MSG_DONTWAIT,
107 };
108 size_t len, total_len = 0;
109 int err, wmem;
110 size_t hdr_size;
111 struct socket *sock = rcu_dereference(vq->private_data);
112 if (!sock)
113 return;
114
115 wmem = atomic_read(&sock->sk->sk_wmem_alloc);
116 if (wmem >= sock->sk->sk_sndbuf)
117 return;
118
119 use_mm(net->dev.mm);
120 mutex_lock(&vq->mutex);
121 vhost_disable_notify(vq);
122
123 if (wmem < sock->sk->sk_sndbuf * 2)
124 tx_poll_stop(net);
125 hdr_size = vq->hdr_size;
126
127 for (;;) {
128 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
129 ARRAY_SIZE(vq->iov),
130 &out, &in,
131 NULL, NULL);
132 /* Nothing new? Wait for eventfd to tell us they refilled. */
133 if (head == vq->num) {
134 wmem = atomic_read(&sock->sk->sk_wmem_alloc);
135 if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
136 tx_poll_start(net, sock);
137 set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
138 break;
139 }
140 if (unlikely(vhost_enable_notify(vq))) {
141 vhost_disable_notify(vq);
142 continue;
143 }
144 break;
145 }
146 if (in) {
147 vq_err(vq, "Unexpected descriptor format for TX: "
148 "out %d, int %d\n", out, in);
149 break;
150 }
151 /* Skip header. TODO: support TSO. */
152 s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
153 msg.msg_iovlen = out;
154 len = iov_length(vq->iov, out);
155 /* Sanity check */
156 if (!len) {
157 vq_err(vq, "Unexpected header len for TX: "
158 "%zd expected %zd\n",
159 iov_length(vq->hdr, s), hdr_size);
160 break;
161 }
162 /* TODO: Check specific error and bomb out unless ENOBUFS? */
163 err = sock->ops->sendmsg(NULL, sock, &msg, len);
164 if (unlikely(err < 0)) {
165 vhost_discard_vq_desc(vq);
166 tx_poll_start(net, sock);
167 break;
168 }
169 if (err != len)
170 pr_err("Truncated TX packet: "
171 " len %d != %zd\n", err, len);
172 vhost_add_used_and_signal(&net->dev, vq, head, 0);
173 total_len += len;
174 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
175 vhost_poll_queue(&vq->poll);
176 break;
177 }
178 }
179
180 mutex_unlock(&vq->mutex);
181 unuse_mm(net->dev.mm);
182}
183
184/* Expects to be always run from workqueue - which acts as
185 * read-size critical section for our kind of RCU. */
186static void handle_rx(struct vhost_net *net)
187{
188 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
189 unsigned head, out, in, log, s;
190 struct vhost_log *vq_log;
191 struct msghdr msg = {
192 .msg_name = NULL,
193 .msg_namelen = 0,
194 .msg_control = NULL, /* FIXME: get and handle RX aux data. */
195 .msg_controllen = 0,
196 .msg_iov = vq->iov,
197 .msg_flags = MSG_DONTWAIT,
198 };
199
200 struct virtio_net_hdr hdr = {
201 .flags = 0,
202 .gso_type = VIRTIO_NET_HDR_GSO_NONE
203 };
204
205 size_t len, total_len = 0;
206 int err;
207 size_t hdr_size;
208 struct socket *sock = rcu_dereference(vq->private_data);
209 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
210 return;
211
212 use_mm(net->dev.mm);
213 mutex_lock(&vq->mutex);
214 vhost_disable_notify(vq);
215 hdr_size = vq->hdr_size;
216
217 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
218 vq->log : NULL;
219
220 for (;;) {
221 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
222 ARRAY_SIZE(vq->iov),
223 &out, &in,
224 vq_log, &log);
225 /* OK, now we need to know about added descriptors. */
226 if (head == vq->num) {
227 if (unlikely(vhost_enable_notify(vq))) {
228 /* They have slipped one in as we were
229 * doing that: check again. */
230 vhost_disable_notify(vq);
231 continue;
232 }
233 /* Nothing new? Wait for eventfd to tell us
234 * they refilled. */
235 break;
236 }
237 /* We don't need to be notified again. */
238 if (out) {
239 vq_err(vq, "Unexpected descriptor format for RX: "
240 "out %d, int %d\n",
241 out, in);
242 break;
243 }
244 /* Skip header. TODO: support TSO/mergeable rx buffers. */
245 s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
246 msg.msg_iovlen = in;
247 len = iov_length(vq->iov, in);
248 /* Sanity check */
249 if (!len) {
250 vq_err(vq, "Unexpected header len for RX: "
251 "%zd expected %zd\n",
252 iov_length(vq->hdr, s), hdr_size);
253 break;
254 }
255 err = sock->ops->recvmsg(NULL, sock, &msg,
256 len, MSG_DONTWAIT | MSG_TRUNC);
257 /* TODO: Check specific error and bomb out unless EAGAIN? */
258 if (err < 0) {
259 vhost_discard_vq_desc(vq);
260 break;
261 }
262 /* TODO: Should check and handle checksum. */
263 if (err > len) {
264 pr_err("Discarded truncated rx packet: "
265 " len %d > %zd\n", err, len);
266 vhost_discard_vq_desc(vq);
267 continue;
268 }
269 len = err;
270 err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
271 if (err) {
272 vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
273 vq->iov->iov_base, err);
274 break;
275 }
276 len += hdr_size;
277 vhost_add_used_and_signal(&net->dev, vq, head, len);
278 if (unlikely(vq_log))
279 vhost_log_write(vq, vq_log, log, len);
280 total_len += len;
281 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
282 vhost_poll_queue(&vq->poll);
283 break;
284 }
285 }
286
287 mutex_unlock(&vq->mutex);
288 unuse_mm(net->dev.mm);
289}
290
291static void handle_tx_kick(struct work_struct *work)
292{
293 struct vhost_virtqueue *vq;
294 struct vhost_net *net;
295 vq = container_of(work, struct vhost_virtqueue, poll.work);
296 net = container_of(vq->dev, struct vhost_net, dev);
297 handle_tx(net);
298}
299
300static void handle_rx_kick(struct work_struct *work)
301{
302 struct vhost_virtqueue *vq;
303 struct vhost_net *net;
304 vq = container_of(work, struct vhost_virtqueue, poll.work);
305 net = container_of(vq->dev, struct vhost_net, dev);
306 handle_rx(net);
307}
308
309static void handle_tx_net(struct work_struct *work)
310{
311 struct vhost_net *net;
312 net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work);
313 handle_tx(net);
314}
315
316static void handle_rx_net(struct work_struct *work)
317{
318 struct vhost_net *net;
319 net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work);
320 handle_rx(net);
321}
322
323static int vhost_net_open(struct inode *inode, struct file *f)
324{
325 struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
326 int r;
327 if (!n)
328 return -ENOMEM;
329 n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
330 n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
331 r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX);
332 if (r < 0) {
333 kfree(n);
334 return r;
335 }
336
337 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT);
338 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN);
339 n->tx_poll_state = VHOST_NET_POLL_DISABLED;
340
341 f->private_data = n;
342
343 return 0;
344}
345
346static void vhost_net_disable_vq(struct vhost_net *n,
347 struct vhost_virtqueue *vq)
348{
349 if (!vq->private_data)
350 return;
351 if (vq == n->vqs + VHOST_NET_VQ_TX) {
352 tx_poll_stop(n);
353 n->tx_poll_state = VHOST_NET_POLL_DISABLED;
354 } else
355 vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
356}
357
358static void vhost_net_enable_vq(struct vhost_net *n,
359 struct vhost_virtqueue *vq)
360{
361 struct socket *sock = vq->private_data;
362 if (!sock)
363 return;
364 if (vq == n->vqs + VHOST_NET_VQ_TX) {
365 n->tx_poll_state = VHOST_NET_POLL_STOPPED;
366 tx_poll_start(n, sock);
367 } else
368 vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
369}
370
371static struct socket *vhost_net_stop_vq(struct vhost_net *n,
372 struct vhost_virtqueue *vq)
373{
374 struct socket *sock;
375
376 mutex_lock(&vq->mutex);
377 sock = vq->private_data;
378 vhost_net_disable_vq(n, vq);
379 rcu_assign_pointer(vq->private_data, NULL);
380 mutex_unlock(&vq->mutex);
381 return sock;
382}
383
384static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
385 struct socket **rx_sock)
386{
387 *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
388 *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
389}
390
391static void vhost_net_flush_vq(struct vhost_net *n, int index)
392{
393 vhost_poll_flush(n->poll + index);
394 vhost_poll_flush(&n->dev.vqs[index].poll);
395}
396
397static void vhost_net_flush(struct vhost_net *n)
398{
399 vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
400 vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
401}
402
403static int vhost_net_release(struct inode *inode, struct file *f)
404{
405 struct vhost_net *n = f->private_data;
406 struct socket *tx_sock;
407 struct socket *rx_sock;
408
409 vhost_net_stop(n, &tx_sock, &rx_sock);
410 vhost_net_flush(n);
411 vhost_dev_cleanup(&n->dev);
412 if (tx_sock)
413 fput(tx_sock->file);
414 if (rx_sock)
415 fput(rx_sock->file);
416 /* We do an extra flush before freeing memory,
417 * since jobs can re-queue themselves. */
418 vhost_net_flush(n);
419 kfree(n);
420 return 0;
421}
422
423static struct socket *get_raw_socket(int fd)
424{
425 struct {
426 struct sockaddr_ll sa;
427 char buf[MAX_ADDR_LEN];
428 } uaddr;
429 int uaddr_len = sizeof uaddr, r;
430 struct socket *sock = sockfd_lookup(fd, &r);
431 if (!sock)
432 return ERR_PTR(-ENOTSOCK);
433
434 /* Parameter checking */
435 if (sock->sk->sk_type != SOCK_RAW) {
436 r = -ESOCKTNOSUPPORT;
437 goto err;
438 }
439
440 r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
441 &uaddr_len, 0);
442 if (r)
443 goto err;
444
445 if (uaddr.sa.sll_family != AF_PACKET) {
446 r = -EPFNOSUPPORT;
447 goto err;
448 }
449 return sock;
450err:
451 fput(sock->file);
452 return ERR_PTR(r);
453}
454
455static struct socket *get_tun_socket(int fd)
456{
457 struct file *file = fget(fd);
458 struct socket *sock;
459 if (!file)
460 return ERR_PTR(-EBADF);
461 sock = tun_get_socket(file);
462 if (IS_ERR(sock))
463 fput(file);
464 return sock;
465}
466
467static struct socket *get_socket(int fd)
468{
469 struct socket *sock;
470 /* special case to disable backend */
471 if (fd == -1)
472 return NULL;
473 sock = get_raw_socket(fd);
474 if (!IS_ERR(sock))
475 return sock;
476 sock = get_tun_socket(fd);
477 if (!IS_ERR(sock))
478 return sock;
479 return ERR_PTR(-ENOTSOCK);
480}
481
482static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
483{
484 struct socket *sock, *oldsock;
485 struct vhost_virtqueue *vq;
486 int r;
487
488 mutex_lock(&n->dev.mutex);
489 r = vhost_dev_check_owner(&n->dev);
490 if (r)
491 goto err;
492
493 if (index >= VHOST_NET_VQ_MAX) {
494 r = -ENOBUFS;
495 goto err;
496 }
497 vq = n->vqs + index;
498 mutex_lock(&vq->mutex);
499
500 /* Verify that ring has been setup correctly. */
501 if (!vhost_vq_access_ok(vq)) {
502 r = -EFAULT;
503 goto err;
504 }
505 sock = get_socket(fd);
506 if (IS_ERR(sock)) {
507 r = PTR_ERR(sock);
508 goto err;
509 }
510
511 /* start polling new socket */
512 oldsock = vq->private_data;
513 if (sock == oldsock)
514 goto done;
515
516 vhost_net_disable_vq(n, vq);
517 rcu_assign_pointer(vq->private_data, sock);
518 vhost_net_enable_vq(n, vq);
519 mutex_unlock(&vq->mutex);
520done:
521 if (oldsock) {
522 vhost_net_flush_vq(n, index);
523 fput(oldsock->file);
524 }
525err:
526 mutex_unlock(&n->dev.mutex);
527 return r;
528}
529
530static long vhost_net_reset_owner(struct vhost_net *n)
531{
532 struct socket *tx_sock = NULL;
533 struct socket *rx_sock = NULL;
534 long err;
535 mutex_lock(&n->dev.mutex);
536 err = vhost_dev_check_owner(&n->dev);
537 if (err)
538 goto done;
539 vhost_net_stop(n, &tx_sock, &rx_sock);
540 vhost_net_flush(n);
541 err = vhost_dev_reset_owner(&n->dev);
542done:
543 mutex_unlock(&n->dev.mutex);
544 if (tx_sock)
545 fput(tx_sock->file);
546 if (rx_sock)
547 fput(rx_sock->file);
548 return err;
549}
550
551static int vhost_net_set_features(struct vhost_net *n, u64 features)
552{
553 size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ?
554 sizeof(struct virtio_net_hdr) : 0;
555 int i;
556 mutex_lock(&n->dev.mutex);
557 if ((features & (1 << VHOST_F_LOG_ALL)) &&
558 !vhost_log_access_ok(&n->dev)) {
559 mutex_unlock(&n->dev.mutex);
560 return -EFAULT;
561 }
562 n->dev.acked_features = features;
563 smp_wmb();
564 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
565 mutex_lock(&n->vqs[i].mutex);
566 n->vqs[i].hdr_size = hdr_size;
567 mutex_unlock(&n->vqs[i].mutex);
568 }
569 vhost_net_flush(n);
570 mutex_unlock(&n->dev.mutex);
571 return 0;
572}
573
574static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
575 unsigned long arg)
576{
577 struct vhost_net *n = f->private_data;
578 void __user *argp = (void __user *)arg;
579 u64 __user *featurep = argp;
580 struct vhost_vring_file backend;
581 u64 features;
582 int r;
583 switch (ioctl) {
584 case VHOST_NET_SET_BACKEND:
585 r = copy_from_user(&backend, argp, sizeof backend);
586 if (r < 0)
587 return r;
588 return vhost_net_set_backend(n, backend.index, backend.fd);
589 case VHOST_GET_FEATURES:
590 features = VHOST_FEATURES;
591 return copy_to_user(featurep, &features, sizeof features);
592 case VHOST_SET_FEATURES:
593 r = copy_from_user(&features, featurep, sizeof features);
594 if (r < 0)
595 return r;
596 if (features & ~VHOST_FEATURES)
597 return -EOPNOTSUPP;
598 return vhost_net_set_features(n, features);
599 case VHOST_RESET_OWNER:
600 return vhost_net_reset_owner(n);
601 default:
602 mutex_lock(&n->dev.mutex);
603 r = vhost_dev_ioctl(&n->dev, ioctl, arg);
604 vhost_net_flush(n);
605 mutex_unlock(&n->dev.mutex);
606 return r;
607 }
608}
609
610#ifdef CONFIG_COMPAT
611static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
612 unsigned long arg)
613{
614 return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
615}
616#endif
617
618const static struct file_operations vhost_net_fops = {
619 .owner = THIS_MODULE,
620 .release = vhost_net_release,
621 .unlocked_ioctl = vhost_net_ioctl,
622#ifdef CONFIG_COMPAT
623 .compat_ioctl = vhost_net_compat_ioctl,
624#endif
625 .open = vhost_net_open,
626};
627
628static struct miscdevice vhost_net_misc = {
629 VHOST_NET_MINOR,
630 "vhost-net",
631 &vhost_net_fops,
632};
633
634int vhost_net_init(void)
635{
636 int r = vhost_init();
637 if (r)
638 goto err_init;
639 r = misc_register(&vhost_net_misc);
640 if (r)
641 goto err_reg;
642 return 0;
643err_reg:
644 vhost_cleanup();
645err_init:
646 return r;
647
648}
649module_init(vhost_net_init);
650
651void vhost_net_exit(void)
652{
653 misc_deregister(&vhost_net_misc);
654 vhost_cleanup();
655}
656module_exit(vhost_net_exit);
657
658MODULE_VERSION("0.0.1");
659MODULE_LICENSE("GPL v2");
660MODULE_AUTHOR("Michael S. Tsirkin");
661MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
new file mode 100644
index 00000000000..c8c25dbc585
--- /dev/null
+++ b/drivers/vhost/vhost.c
@@ -0,0 +1,1098 @@
1/* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
3 *
4 * Author: Michael S. Tsirkin <mst@redhat.com>
5 *
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/lguest/lguest.c, by Rusty Russell
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2.
10 *
11 * Generic code for virtio server in host kernel.
12 */
13
14#include <linux/eventfd.h>
15#include <linux/vhost.h>
16#include <linux/virtio_net.h>
17#include <linux/mm.h>
18#include <linux/miscdevice.h>
19#include <linux/mutex.h>
20#include <linux/workqueue.h>
21#include <linux/rcupdate.h>
22#include <linux/poll.h>
23#include <linux/file.h>
24#include <linux/highmem.h>
25
26#include <linux/net.h>
27#include <linux/if_packet.h>
28#include <linux/if_arp.h>
29
30#include <net/sock.h>
31
32#include "vhost.h"
33
34enum {
35 VHOST_MEMORY_MAX_NREGIONS = 64,
36 VHOST_MEMORY_F_LOG = 0x1,
37};
38
39static struct workqueue_struct *vhost_workqueue;
40
41static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
42 poll_table *pt)
43{
44 struct vhost_poll *poll;
45 poll = container_of(pt, struct vhost_poll, table);
46
47 poll->wqh = wqh;
48 add_wait_queue(wqh, &poll->wait);
49}
50
51static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
52 void *key)
53{
54 struct vhost_poll *poll;
55 poll = container_of(wait, struct vhost_poll, wait);
56 if (!((unsigned long)key & poll->mask))
57 return 0;
58
59 queue_work(vhost_workqueue, &poll->work);
60 return 0;
61}
62
63/* Init poll structure */
64void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
65 unsigned long mask)
66{
67 INIT_WORK(&poll->work, func);
68 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
69 init_poll_funcptr(&poll->table, vhost_poll_func);
70 poll->mask = mask;
71}
72
73/* Start polling a file. We add ourselves to file's wait queue. The caller must
74 * keep a reference to a file until after vhost_poll_stop is called. */
75void vhost_poll_start(struct vhost_poll *poll, struct file *file)
76{
77 unsigned long mask;
78 mask = file->f_op->poll(file, &poll->table);
79 if (mask)
80 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
81}
82
83/* Stop polling a file. After this function returns, it becomes safe to drop the
84 * file reference. You must also flush afterwards. */
85void vhost_poll_stop(struct vhost_poll *poll)
86{
87 remove_wait_queue(poll->wqh, &poll->wait);
88}
89
90/* Flush any work that has been scheduled. When calling this, don't hold any
91 * locks that are also used by the callback. */
92void vhost_poll_flush(struct vhost_poll *poll)
93{
94 flush_work(&poll->work);
95}
96
97void vhost_poll_queue(struct vhost_poll *poll)
98{
99 queue_work(vhost_workqueue, &poll->work);
100}
101
102static void vhost_vq_reset(struct vhost_dev *dev,
103 struct vhost_virtqueue *vq)
104{
105 vq->num = 1;
106 vq->desc = NULL;
107 vq->avail = NULL;
108 vq->used = NULL;
109 vq->last_avail_idx = 0;
110 vq->avail_idx = 0;
111 vq->last_used_idx = 0;
112 vq->used_flags = 0;
113 vq->used_flags = 0;
114 vq->log_used = false;
115 vq->log_addr = -1ull;
116 vq->hdr_size = 0;
117 vq->private_data = NULL;
118 vq->log_base = NULL;
119 vq->error_ctx = NULL;
120 vq->error = NULL;
121 vq->kick = NULL;
122 vq->call_ctx = NULL;
123 vq->call = NULL;
124}
125
126long vhost_dev_init(struct vhost_dev *dev,
127 struct vhost_virtqueue *vqs, int nvqs)
128{
129 int i;
130 dev->vqs = vqs;
131 dev->nvqs = nvqs;
132 mutex_init(&dev->mutex);
133 dev->log_ctx = NULL;
134 dev->log_file = NULL;
135 dev->memory = NULL;
136 dev->mm = NULL;
137
138 for (i = 0; i < dev->nvqs; ++i) {
139 dev->vqs[i].dev = dev;
140 mutex_init(&dev->vqs[i].mutex);
141 vhost_vq_reset(dev, dev->vqs + i);
142 if (dev->vqs[i].handle_kick)
143 vhost_poll_init(&dev->vqs[i].poll,
144 dev->vqs[i].handle_kick,
145 POLLIN);
146 }
147 return 0;
148}
149
150/* Caller should have device mutex */
151long vhost_dev_check_owner(struct vhost_dev *dev)
152{
153 /* Are you the owner? If not, I don't think you mean to do that */
154 return dev->mm == current->mm ? 0 : -EPERM;
155}
156
157/* Caller should have device mutex */
158static long vhost_dev_set_owner(struct vhost_dev *dev)
159{
160 /* Is there an owner already? */
161 if (dev->mm)
162 return -EBUSY;
163 /* No owner, become one */
164 dev->mm = get_task_mm(current);
165 return 0;
166}
167
168/* Caller should have device mutex */
169long vhost_dev_reset_owner(struct vhost_dev *dev)
170{
171 struct vhost_memory *memory;
172
173 /* Restore memory to default empty mapping. */
174 memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
175 if (!memory)
176 return -ENOMEM;
177
178 vhost_dev_cleanup(dev);
179
180 memory->nregions = 0;
181 dev->memory = memory;
182 return 0;
183}
184
185/* Caller should have device mutex */
186void vhost_dev_cleanup(struct vhost_dev *dev)
187{
188 int i;
189 for (i = 0; i < dev->nvqs; ++i) {
190 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
191 vhost_poll_stop(&dev->vqs[i].poll);
192 vhost_poll_flush(&dev->vqs[i].poll);
193 }
194 if (dev->vqs[i].error_ctx)
195 eventfd_ctx_put(dev->vqs[i].error_ctx);
196 if (dev->vqs[i].error)
197 fput(dev->vqs[i].error);
198 if (dev->vqs[i].kick)
199 fput(dev->vqs[i].kick);
200 if (dev->vqs[i].call_ctx)
201 eventfd_ctx_put(dev->vqs[i].call_ctx);
202 if (dev->vqs[i].call)
203 fput(dev->vqs[i].call);
204 vhost_vq_reset(dev, dev->vqs + i);
205 }
206 if (dev->log_ctx)
207 eventfd_ctx_put(dev->log_ctx);
208 dev->log_ctx = NULL;
209 if (dev->log_file)
210 fput(dev->log_file);
211 dev->log_file = NULL;
212 /* No one will access memory at this point */
213 kfree(dev->memory);
214 dev->memory = NULL;
215 if (dev->mm)
216 mmput(dev->mm);
217 dev->mm = NULL;
218}
219
220static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
221{
222 u64 a = addr / VHOST_PAGE_SIZE / 8;
223 /* Make sure 64 bit math will not overflow. */
224 if (a > ULONG_MAX - (unsigned long)log_base ||
225 a + (unsigned long)log_base > ULONG_MAX)
226 return -EFAULT;
227
228 return access_ok(VERIFY_WRITE, log_base + a,
229 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
230}
231
232/* Caller should have vq mutex and device mutex. */
233static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
234 int log_all)
235{
236 int i;
237 for (i = 0; i < mem->nregions; ++i) {
238 struct vhost_memory_region *m = mem->regions + i;
239 unsigned long a = m->userspace_addr;
240 if (m->memory_size > ULONG_MAX)
241 return 0;
242 else if (!access_ok(VERIFY_WRITE, (void __user *)a,
243 m->memory_size))
244 return 0;
245 else if (log_all && !log_access_ok(log_base,
246 m->guest_phys_addr,
247 m->memory_size))
248 return 0;
249 }
250 return 1;
251}
252
253/* Can we switch to this memory table? */
254/* Caller should have device mutex but not vq mutex */
255static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
256 int log_all)
257{
258 int i;
259 for (i = 0; i < d->nvqs; ++i) {
260 int ok;
261 mutex_lock(&d->vqs[i].mutex);
262 /* If ring is inactive, will check when it's enabled. */
263 if (d->vqs[i].private_data)
264 ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
265 log_all);
266 else
267 ok = 1;
268 mutex_unlock(&d->vqs[i].mutex);
269 if (!ok)
270 return 0;
271 }
272 return 1;
273}
274
275static int vq_access_ok(unsigned int num,
276 struct vring_desc __user *desc,
277 struct vring_avail __user *avail,
278 struct vring_used __user *used)
279{
280 return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
281 access_ok(VERIFY_READ, avail,
282 sizeof *avail + num * sizeof *avail->ring) &&
283 access_ok(VERIFY_WRITE, used,
284 sizeof *used + num * sizeof *used->ring);
285}
286
287/* Can we log writes? */
288/* Caller should have device mutex but not vq mutex */
289int vhost_log_access_ok(struct vhost_dev *dev)
290{
291 return memory_access_ok(dev, dev->memory, 1);
292}
293
294/* Verify access for write logging. */
295/* Caller should have vq mutex and device mutex */
296static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
297{
298 return vq_memory_access_ok(log_base, vq->dev->memory,
299 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
300 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
301 sizeof *vq->used +
302 vq->num * sizeof *vq->used->ring));
303}
304
305/* Can we start vq? */
306/* Caller should have vq mutex and device mutex */
307int vhost_vq_access_ok(struct vhost_virtqueue *vq)
308{
309 return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) &&
310 vq_log_access_ok(vq, vq->log_base);
311}
312
313static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
314{
315 struct vhost_memory mem, *newmem, *oldmem;
316 unsigned long size = offsetof(struct vhost_memory, regions);
317 long r;
318 r = copy_from_user(&mem, m, size);
319 if (r)
320 return r;
321 if (mem.padding)
322 return -EOPNOTSUPP;
323 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
324 return -E2BIG;
325 newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
326 if (!newmem)
327 return -ENOMEM;
328
329 memcpy(newmem, &mem, size);
330 r = copy_from_user(newmem->regions, m->regions,
331 mem.nregions * sizeof *m->regions);
332 if (r) {
333 kfree(newmem);
334 return r;
335 }
336
337 if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL)))
338 return -EFAULT;
339 oldmem = d->memory;
340 rcu_assign_pointer(d->memory, newmem);
341 synchronize_rcu();
342 kfree(oldmem);
343 return 0;
344}
345
346static int init_used(struct vhost_virtqueue *vq,
347 struct vring_used __user *used)
348{
349 int r = put_user(vq->used_flags, &used->flags);
350 if (r)
351 return r;
352 return get_user(vq->last_used_idx, &used->idx);
353}
354
355static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
356{
357 struct file *eventfp, *filep = NULL,
358 *pollstart = NULL, *pollstop = NULL;
359 struct eventfd_ctx *ctx = NULL;
360 u32 __user *idxp = argp;
361 struct vhost_virtqueue *vq;
362 struct vhost_vring_state s;
363 struct vhost_vring_file f;
364 struct vhost_vring_addr a;
365 u32 idx;
366 long r;
367
368 r = get_user(idx, idxp);
369 if (r < 0)
370 return r;
371 if (idx > d->nvqs)
372 return -ENOBUFS;
373
374 vq = d->vqs + idx;
375
376 mutex_lock(&vq->mutex);
377
378 switch (ioctl) {
379 case VHOST_SET_VRING_NUM:
380 /* Resizing ring with an active backend?
381 * You don't want to do that. */
382 if (vq->private_data) {
383 r = -EBUSY;
384 break;
385 }
386 r = copy_from_user(&s, argp, sizeof s);
387 if (r < 0)
388 break;
389 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
390 r = -EINVAL;
391 break;
392 }
393 vq->num = s.num;
394 break;
395 case VHOST_SET_VRING_BASE:
396 /* Moving base with an active backend?
397 * You don't want to do that. */
398 if (vq->private_data) {
399 r = -EBUSY;
400 break;
401 }
402 r = copy_from_user(&s, argp, sizeof s);
403 if (r < 0)
404 break;
405 if (s.num > 0xffff) {
406 r = -EINVAL;
407 break;
408 }
409 vq->last_avail_idx = s.num;
410 /* Forget the cached index value. */
411 vq->avail_idx = vq->last_avail_idx;
412 break;
413 case VHOST_GET_VRING_BASE:
414 s.index = idx;
415 s.num = vq->last_avail_idx;
416 r = copy_to_user(argp, &s, sizeof s);
417 break;
418 case VHOST_SET_VRING_ADDR:
419 r = copy_from_user(&a, argp, sizeof a);
420 if (r < 0)
421 break;
422 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
423 r = -EOPNOTSUPP;
424 break;
425 }
426 /* For 32bit, verify that the top 32bits of the user
427 data are set to zero. */
428 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
429 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
430 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
431 r = -EFAULT;
432 break;
433 }
434 if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
435 (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
436 (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
437 r = -EINVAL;
438 break;
439 }
440
441 /* We only verify access here if backend is configured.
442 * If it is not, we don't as size might not have been setup.
443 * We will verify when backend is configured. */
444 if (vq->private_data) {
445 if (!vq_access_ok(vq->num,
446 (void __user *)(unsigned long)a.desc_user_addr,
447 (void __user *)(unsigned long)a.avail_user_addr,
448 (void __user *)(unsigned long)a.used_user_addr)) {
449 r = -EINVAL;
450 break;
451 }
452
453 /* Also validate log access for used ring if enabled. */
454 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
455 !log_access_ok(vq->log_base, a.log_guest_addr,
456 sizeof *vq->used +
457 vq->num * sizeof *vq->used->ring)) {
458 r = -EINVAL;
459 break;
460 }
461 }
462
463 r = init_used(vq, (struct vring_used __user *)(unsigned long)
464 a.used_user_addr);
465 if (r)
466 break;
467 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
468 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
469 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
470 vq->log_addr = a.log_guest_addr;
471 vq->used = (void __user *)(unsigned long)a.used_user_addr;
472 break;
473 case VHOST_SET_VRING_KICK:
474 r = copy_from_user(&f, argp, sizeof f);
475 if (r < 0)
476 break;
477 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
478 if (IS_ERR(eventfp))
479 return PTR_ERR(eventfp);
480 if (eventfp != vq->kick) {
481 pollstop = filep = vq->kick;
482 pollstart = vq->kick = eventfp;
483 } else
484 filep = eventfp;
485 break;
486 case VHOST_SET_VRING_CALL:
487 r = copy_from_user(&f, argp, sizeof f);
488 if (r < 0)
489 break;
490 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
491 if (IS_ERR(eventfp))
492 return PTR_ERR(eventfp);
493 if (eventfp != vq->call) {
494 filep = vq->call;
495 ctx = vq->call_ctx;
496 vq->call = eventfp;
497 vq->call_ctx = eventfp ?
498 eventfd_ctx_fileget(eventfp) : NULL;
499 } else
500 filep = eventfp;
501 break;
502 case VHOST_SET_VRING_ERR:
503 r = copy_from_user(&f, argp, sizeof f);
504 if (r < 0)
505 break;
506 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
507 if (IS_ERR(eventfp))
508 return PTR_ERR(eventfp);
509 if (eventfp != vq->error) {
510 filep = vq->error;
511 vq->error = eventfp;
512 ctx = vq->error_ctx;
513 vq->error_ctx = eventfp ?
514 eventfd_ctx_fileget(eventfp) : NULL;
515 } else
516 filep = eventfp;
517 break;
518 default:
519 r = -ENOIOCTLCMD;
520 }
521
522 if (pollstop && vq->handle_kick)
523 vhost_poll_stop(&vq->poll);
524
525 if (ctx)
526 eventfd_ctx_put(ctx);
527 if (filep)
528 fput(filep);
529
530 if (pollstart && vq->handle_kick)
531 vhost_poll_start(&vq->poll, vq->kick);
532
533 mutex_unlock(&vq->mutex);
534
535 if (pollstop && vq->handle_kick)
536 vhost_poll_flush(&vq->poll);
537 return r;
538}
539
540/* Caller must have device mutex */
541long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
542{
543 void __user *argp = (void __user *)arg;
544 struct file *eventfp, *filep = NULL;
545 struct eventfd_ctx *ctx = NULL;
546 u64 p;
547 long r;
548 int i, fd;
549
550 /* If you are not the owner, you can become one */
551 if (ioctl == VHOST_SET_OWNER) {
552 r = vhost_dev_set_owner(d);
553 goto done;
554 }
555
556 /* You must be the owner to do anything else */
557 r = vhost_dev_check_owner(d);
558 if (r)
559 goto done;
560
561 switch (ioctl) {
562 case VHOST_SET_MEM_TABLE:
563 r = vhost_set_memory(d, argp);
564 break;
565 case VHOST_SET_LOG_BASE:
566 r = copy_from_user(&p, argp, sizeof p);
567 if (r < 0)
568 break;
569 if ((u64)(unsigned long)p != p) {
570 r = -EFAULT;
571 break;
572 }
573 for (i = 0; i < d->nvqs; ++i) {
574 struct vhost_virtqueue *vq;
575 void __user *base = (void __user *)(unsigned long)p;
576 vq = d->vqs + i;
577 mutex_lock(&vq->mutex);
578 /* If ring is inactive, will check when it's enabled. */
579 if (vq->private_data && !vq_log_access_ok(vq, base))
580 r = -EFAULT;
581 else
582 vq->log_base = base;
583 mutex_unlock(&vq->mutex);
584 }
585 break;
586 case VHOST_SET_LOG_FD:
587 r = get_user(fd, (int __user *)argp);
588 if (r < 0)
589 break;
590 eventfp = fd == -1 ? NULL : eventfd_fget(fd);
591 if (IS_ERR(eventfp)) {
592 r = PTR_ERR(eventfp);
593 break;
594 }
595 if (eventfp != d->log_file) {
596 filep = d->log_file;
597 ctx = d->log_ctx;
598 d->log_ctx = eventfp ?
599 eventfd_ctx_fileget(eventfp) : NULL;
600 } else
601 filep = eventfp;
602 for (i = 0; i < d->nvqs; ++i) {
603 mutex_lock(&d->vqs[i].mutex);
604 d->vqs[i].log_ctx = d->log_ctx;
605 mutex_unlock(&d->vqs[i].mutex);
606 }
607 if (ctx)
608 eventfd_ctx_put(ctx);
609 if (filep)
610 fput(filep);
611 break;
612 default:
613 r = vhost_set_vring(d, ioctl, argp);
614 break;
615 }
616done:
617 return r;
618}
619
620static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
621 __u64 addr, __u32 len)
622{
623 struct vhost_memory_region *reg;
624 int i;
625 /* linear search is not brilliant, but we really have on the order of 6
626 * regions in practice */
627 for (i = 0; i < mem->nregions; ++i) {
628 reg = mem->regions + i;
629 if (reg->guest_phys_addr <= addr &&
630 reg->guest_phys_addr + reg->memory_size - 1 >= addr)
631 return reg;
632 }
633 return NULL;
634}
635
636/* TODO: This is really inefficient. We need something like get_user()
637 * (instruction directly accesses the data, with an exception table entry
638 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
639 */
640static int set_bit_to_user(int nr, void __user *addr)
641{
642 unsigned long log = (unsigned long)addr;
643 struct page *page;
644 void *base;
645 int bit = nr + (log % PAGE_SIZE) * 8;
646 int r;
647 r = get_user_pages_fast(log, 1, 1, &page);
648 if (r)
649 return r;
650 base = kmap_atomic(page, KM_USER0);
651 set_bit(bit, base);
652 kunmap_atomic(base, KM_USER0);
653 set_page_dirty_lock(page);
654 put_page(page);
655 return 0;
656}
657
658static int log_write(void __user *log_base,
659 u64 write_address, u64 write_length)
660{
661 int r;
662 if (!write_length)
663 return 0;
664 write_address /= VHOST_PAGE_SIZE;
665 for (;;) {
666 u64 base = (u64)(unsigned long)log_base;
667 u64 log = base + write_address / 8;
668 int bit = write_address % 8;
669 if ((u64)(unsigned long)log != log)
670 return -EFAULT;
671 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
672 if (r < 0)
673 return r;
674 if (write_length <= VHOST_PAGE_SIZE)
675 break;
676 write_length -= VHOST_PAGE_SIZE;
677 write_address += VHOST_PAGE_SIZE;
678 }
679 return r;
680}
681
682int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
683 unsigned int log_num, u64 len)
684{
685 int i, r;
686
687 /* Make sure data written is seen before log. */
688 wmb();
689 for (i = 0; i < log_num; ++i) {
690 u64 l = min(log[i].len, len);
691 r = log_write(vq->log_base, log[i].addr, l);
692 if (r < 0)
693 return r;
694 len -= l;
695 if (!len)
696 return 0;
697 }
698 if (vq->log_ctx)
699 eventfd_signal(vq->log_ctx, 1);
700 /* Length written exceeds what we have stored. This is a bug. */
701 BUG();
702 return 0;
703}
704
705int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
706 struct iovec iov[], int iov_size)
707{
708 const struct vhost_memory_region *reg;
709 struct vhost_memory *mem;
710 struct iovec *_iov;
711 u64 s = 0;
712 int ret = 0;
713
714 rcu_read_lock();
715
716 mem = rcu_dereference(dev->memory);
717 while ((u64)len > s) {
718 u64 size;
719 if (ret >= iov_size) {
720 ret = -ENOBUFS;
721 break;
722 }
723 reg = find_region(mem, addr, len);
724 if (!reg) {
725 ret = -EFAULT;
726 break;
727 }
728 _iov = iov + ret;
729 size = reg->memory_size - addr + reg->guest_phys_addr;
730 _iov->iov_len = min((u64)len, size);
731 _iov->iov_base = (void *)(unsigned long)
732 (reg->userspace_addr + addr - reg->guest_phys_addr);
733 s += size;
734 addr += size;
735 ++ret;
736 }
737
738 rcu_read_unlock();
739 return ret;
740}
741
742/* Each buffer in the virtqueues is actually a chain of descriptors. This
743 * function returns the next descriptor in the chain,
744 * or -1U if we're at the end. */
745static unsigned next_desc(struct vring_desc *desc)
746{
747 unsigned int next;
748
749 /* If this descriptor says it doesn't chain, we're done. */
750 if (!(desc->flags & VRING_DESC_F_NEXT))
751 return -1U;
752
753 /* Check they're not leading us off end of descriptors. */
754 next = desc->next;
755 /* Make sure compiler knows to grab that: we don't want it changing! */
756 /* We will use the result as an index in an array, so most
757 * architectures only need a compiler barrier here. */
758 read_barrier_depends();
759
760 return next;
761}
762
763static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
764 struct iovec iov[], unsigned int iov_size,
765 unsigned int *out_num, unsigned int *in_num,
766 struct vhost_log *log, unsigned int *log_num,
767 struct vring_desc *indirect)
768{
769 struct vring_desc desc;
770 unsigned int i = 0, count, found = 0;
771 int ret;
772
773 /* Sanity check */
774 if (indirect->len % sizeof desc) {
775 vq_err(vq, "Invalid length in indirect descriptor: "
776 "len 0x%llx not multiple of 0x%zx\n",
777 (unsigned long long)indirect->len,
778 sizeof desc);
779 return -EINVAL;
780 }
781
782 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
783 ARRAY_SIZE(vq->indirect));
784 if (ret < 0) {
785 vq_err(vq, "Translation failure %d in indirect.\n", ret);
786 return ret;
787 }
788
789 /* We will use the result as an address to read from, so most
790 * architectures only need a compiler barrier here. */
791 read_barrier_depends();
792
793 count = indirect->len / sizeof desc;
794 /* Buffers are chained via a 16 bit next field, so
795 * we can have at most 2^16 of these. */
796 if (count > USHORT_MAX + 1) {
797 vq_err(vq, "Indirect buffer length too big: %d\n",
798 indirect->len);
799 return -E2BIG;
800 }
801
802 do {
803 unsigned iov_count = *in_num + *out_num;
804 if (++found > count) {
805 vq_err(vq, "Loop detected: last one at %u "
806 "indirect size %u\n",
807 i, count);
808 return -EINVAL;
809 }
810 if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
811 sizeof desc)) {
812 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
813 i, (size_t)indirect->addr + i * sizeof desc);
814 return -EINVAL;
815 }
816 if (desc.flags & VRING_DESC_F_INDIRECT) {
817 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
818 i, (size_t)indirect->addr + i * sizeof desc);
819 return -EINVAL;
820 }
821
822 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
823 iov_size - iov_count);
824 if (ret < 0) {
825 vq_err(vq, "Translation failure %d indirect idx %d\n",
826 ret, i);
827 return ret;
828 }
829 /* If this is an input descriptor, increment that count. */
830 if (desc.flags & VRING_DESC_F_WRITE) {
831 *in_num += ret;
832 if (unlikely(log)) {
833 log[*log_num].addr = desc.addr;
834 log[*log_num].len = desc.len;
835 ++*log_num;
836 }
837 } else {
838 /* If it's an output descriptor, they're all supposed
839 * to come before any input descriptors. */
840 if (*in_num) {
841 vq_err(vq, "Indirect descriptor "
842 "has out after in: idx %d\n", i);
843 return -EINVAL;
844 }
845 *out_num += ret;
846 }
847 } while ((i = next_desc(&desc)) != -1);
848 return 0;
849}
850
851/* This looks in the virtqueue and for the first available buffer, and converts
852 * it to an iovec for convenient access. Since descriptors consist of some
853 * number of output then some number of input descriptors, it's actually two
854 * iovecs, but we pack them into one and note how many of each there were.
855 *
856 * This function returns the descriptor number found, or vq->num (which
857 * is never a valid descriptor number) if none was found. */
858unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
859 struct iovec iov[], unsigned int iov_size,
860 unsigned int *out_num, unsigned int *in_num,
861 struct vhost_log *log, unsigned int *log_num)
862{
863 struct vring_desc desc;
864 unsigned int i, head, found = 0;
865 u16 last_avail_idx;
866 int ret;
867
868 /* Check it isn't doing very strange things with descriptor numbers. */
869 last_avail_idx = vq->last_avail_idx;
870 if (get_user(vq->avail_idx, &vq->avail->idx)) {
871 vq_err(vq, "Failed to access avail idx at %p\n",
872 &vq->avail->idx);
873 return vq->num;
874 }
875
876 if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) {
877 vq_err(vq, "Guest moved used index from %u to %u",
878 last_avail_idx, vq->avail_idx);
879 return vq->num;
880 }
881
882 /* If there's nothing new since last we looked, return invalid. */
883 if (vq->avail_idx == last_avail_idx)
884 return vq->num;
885
886 /* Only get avail ring entries after they have been exposed by guest. */
887 rmb();
888
889 /* Grab the next descriptor number they're advertising, and increment
890 * the index we've seen. */
891 if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) {
892 vq_err(vq, "Failed to read head: idx %d address %p\n",
893 last_avail_idx,
894 &vq->avail->ring[last_avail_idx % vq->num]);
895 return vq->num;
896 }
897
898 /* If their number is silly, that's an error. */
899 if (head >= vq->num) {
900 vq_err(vq, "Guest says index %u > %u is available",
901 head, vq->num);
902 return vq->num;
903 }
904
905 /* When we start there are none of either input nor output. */
906 *out_num = *in_num = 0;
907 if (unlikely(log))
908 *log_num = 0;
909
910 i = head;
911 do {
912 unsigned iov_count = *in_num + *out_num;
913 if (i >= vq->num) {
914 vq_err(vq, "Desc index is %u > %u, head = %u",
915 i, vq->num, head);
916 return vq->num;
917 }
918 if (++found > vq->num) {
919 vq_err(vq, "Loop detected: last one at %u "
920 "vq size %u head %u\n",
921 i, vq->num, head);
922 return vq->num;
923 }
924 ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
925 if (ret) {
926 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
927 i, vq->desc + i);
928 return vq->num;
929 }
930 if (desc.flags & VRING_DESC_F_INDIRECT) {
931 ret = get_indirect(dev, vq, iov, iov_size,
932 out_num, in_num,
933 log, log_num, &desc);
934 if (ret < 0) {
935 vq_err(vq, "Failure detected "
936 "in indirect descriptor at idx %d\n", i);
937 return vq->num;
938 }
939 continue;
940 }
941
942 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
943 iov_size - iov_count);
944 if (ret < 0) {
945 vq_err(vq, "Translation failure %d descriptor idx %d\n",
946 ret, i);
947 return vq->num;
948 }
949 if (desc.flags & VRING_DESC_F_WRITE) {
950 /* If this is an input descriptor,
951 * increment that count. */
952 *in_num += ret;
953 if (unlikely(log)) {
954 log[*log_num].addr = desc.addr;
955 log[*log_num].len = desc.len;
956 ++*log_num;
957 }
958 } else {
959 /* If it's an output descriptor, they're all supposed
960 * to come before any input descriptors. */
961 if (*in_num) {
962 vq_err(vq, "Descriptor has out after in: "
963 "idx %d\n", i);
964 return vq->num;
965 }
966 *out_num += ret;
967 }
968 } while ((i = next_desc(&desc)) != -1);
969
970 /* On success, increment avail index. */
971 vq->last_avail_idx++;
972 return head;
973}
974
975/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
976void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
977{
978 vq->last_avail_idx--;
979}
980
981/* After we've used one of their buffers, we tell them about it. We'll then
982 * want to notify the guest, using eventfd. */
983int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
984{
985 struct vring_used_elem *used;
986
987 /* The virtqueue contains a ring of used buffers. Get a pointer to the
988 * next entry in that used ring. */
989 used = &vq->used->ring[vq->last_used_idx % vq->num];
990 if (put_user(head, &used->id)) {
991 vq_err(vq, "Failed to write used id");
992 return -EFAULT;
993 }
994 if (put_user(len, &used->len)) {
995 vq_err(vq, "Failed to write used len");
996 return -EFAULT;
997 }
998 /* Make sure buffer is written before we update index. */
999 wmb();
1000 if (put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1001 vq_err(vq, "Failed to increment used idx");
1002 return -EFAULT;
1003 }
1004 if (unlikely(vq->log_used)) {
1005 /* Make sure data is seen before log. */
1006 wmb();
1007 log_write(vq->log_base, vq->log_addr + sizeof *vq->used->ring *
1008 (vq->last_used_idx % vq->num),
1009 sizeof *vq->used->ring);
1010 log_write(vq->log_base, vq->log_addr, sizeof *vq->used->ring);
1011 if (vq->log_ctx)
1012 eventfd_signal(vq->log_ctx, 1);
1013 }
1014 vq->last_used_idx++;
1015 return 0;
1016}
1017
1018/* This actually signals the guest, using eventfd. */
1019void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1020{
1021 __u16 flags = 0;
1022 if (get_user(flags, &vq->avail->flags)) {
1023 vq_err(vq, "Failed to get flags");
1024 return;
1025 }
1026
1027 /* If they don't want an interrupt, don't signal, unless empty. */
1028 if ((flags & VRING_AVAIL_F_NO_INTERRUPT) &&
1029 (vq->avail_idx != vq->last_avail_idx ||
1030 !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
1031 return;
1032
1033 /* Signal the Guest tell them we used something up. */
1034 if (vq->call_ctx)
1035 eventfd_signal(vq->call_ctx, 1);
1036}
1037
1038/* And here's the combo meal deal. Supersize me! */
1039void vhost_add_used_and_signal(struct vhost_dev *dev,
1040 struct vhost_virtqueue *vq,
1041 unsigned int head, int len)
1042{
1043 vhost_add_used(vq, head, len);
1044 vhost_signal(dev, vq);
1045}
1046
1047/* OK, now we need to know about added descriptors. */
1048bool vhost_enable_notify(struct vhost_virtqueue *vq)
1049{
1050 u16 avail_idx;
1051 int r;
1052 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1053 return false;
1054 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1055 r = put_user(vq->used_flags, &vq->used->flags);
1056 if (r) {
1057 vq_err(vq, "Failed to enable notification at %p: %d\n",
1058 &vq->used->flags, r);
1059 return false;
1060 }
1061 /* They could have slipped one in as we were doing that: make
1062 * sure it's written, then check again. */
1063 mb();
1064 r = get_user(avail_idx, &vq->avail->idx);
1065 if (r) {
1066 vq_err(vq, "Failed to check avail idx at %p: %d\n",
1067 &vq->avail->idx, r);
1068 return false;
1069 }
1070
1071 return avail_idx != vq->last_avail_idx;
1072}
1073
1074/* We don't need to be notified again. */
1075void vhost_disable_notify(struct vhost_virtqueue *vq)
1076{
1077 int r;
1078 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1079 return;
1080 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1081 r = put_user(vq->used_flags, &vq->used->flags);
1082 if (r)
1083 vq_err(vq, "Failed to enable notification at %p: %d\n",
1084 &vq->used->flags, r);
1085}
1086
1087int vhost_init(void)
1088{
1089 vhost_workqueue = create_singlethread_workqueue("vhost");
1090 if (!vhost_workqueue)
1091 return -ENOMEM;
1092 return 0;
1093}
1094
1095void vhost_cleanup(void)
1096{
1097 destroy_workqueue(vhost_workqueue);
1098}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
new file mode 100644
index 00000000000..44591ba9b07
--- /dev/null
+++ b/drivers/vhost/vhost.h
@@ -0,0 +1,161 @@
1#ifndef _VHOST_H
2#define _VHOST_H
3
4#include <linux/eventfd.h>
5#include <linux/vhost.h>
6#include <linux/mm.h>
7#include <linux/mutex.h>
8#include <linux/workqueue.h>
9#include <linux/poll.h>
10#include <linux/file.h>
11#include <linux/skbuff.h>
12#include <linux/uio.h>
13#include <linux/virtio_config.h>
14#include <linux/virtio_ring.h>
15
16struct vhost_device;
17
18enum {
19 /* Enough place for all fragments, head, and virtio net header. */
20 VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
21};
22
23/* Poll a file (eventfd or socket) */
24/* Note: there's nothing vhost specific about this structure. */
25struct vhost_poll {
26 poll_table table;
27 wait_queue_head_t *wqh;
28 wait_queue_t wait;
29 /* struct which will handle all actual work. */
30 struct work_struct work;
31 unsigned long mask;
32};
33
34void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
35 unsigned long mask);
36void vhost_poll_start(struct vhost_poll *poll, struct file *file);
37void vhost_poll_stop(struct vhost_poll *poll);
38void vhost_poll_flush(struct vhost_poll *poll);
39void vhost_poll_queue(struct vhost_poll *poll);
40
41struct vhost_log {
42 u64 addr;
43 u64 len;
44};
45
46/* The virtqueue structure describes a queue attached to a device. */
47struct vhost_virtqueue {
48 struct vhost_dev *dev;
49
50 /* The actual ring of buffers. */
51 struct mutex mutex;
52 unsigned int num;
53 struct vring_desc __user *desc;
54 struct vring_avail __user *avail;
55 struct vring_used __user *used;
56 struct file *kick;
57 struct file *call;
58 struct file *error;
59 struct eventfd_ctx *call_ctx;
60 struct eventfd_ctx *error_ctx;
61 struct eventfd_ctx *log_ctx;
62
63 struct vhost_poll poll;
64
65 /* The routine to call when the Guest pings us, or timeout. */
66 work_func_t handle_kick;
67
68 /* Last available index we saw. */
69 u16 last_avail_idx;
70
71 /* Caches available index value from user. */
72 u16 avail_idx;
73
74 /* Last index we used. */
75 u16 last_used_idx;
76
77 /* Used flags */
78 u16 used_flags;
79
80 /* Log writes to used structure. */
81 bool log_used;
82 u64 log_addr;
83
84 struct iovec indirect[VHOST_NET_MAX_SG];
85 struct iovec iov[VHOST_NET_MAX_SG];
86 struct iovec hdr[VHOST_NET_MAX_SG];
87 size_t hdr_size;
88 /* We use a kind of RCU to access private pointer.
89 * All readers access it from workqueue, which makes it possible to
90 * flush the workqueue instead of synchronize_rcu. Therefore readers do
91 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
92 * work item execution acts instead of rcu_read_lock() and the end of
93 * work item execution acts instead of rcu_read_lock().
94 * Writers use virtqueue mutex. */
95 void *private_data;
96 /* Log write descriptors */
97 void __user *log_base;
98 struct vhost_log log[VHOST_NET_MAX_SG];
99};
100
101struct vhost_dev {
102 /* Readers use RCU to access memory table pointer
103 * log base pointer and features.
104 * Writers use mutex below.*/
105 struct vhost_memory *memory;
106 struct mm_struct *mm;
107 struct mutex mutex;
108 unsigned acked_features;
109 struct vhost_virtqueue *vqs;
110 int nvqs;
111 struct file *log_file;
112 struct eventfd_ctx *log_ctx;
113};
114
115long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
116long vhost_dev_check_owner(struct vhost_dev *);
117long vhost_dev_reset_owner(struct vhost_dev *);
118void vhost_dev_cleanup(struct vhost_dev *);
119long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
120int vhost_vq_access_ok(struct vhost_virtqueue *vq);
121int vhost_log_access_ok(struct vhost_dev *);
122
123unsigned vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
124 struct iovec iov[], unsigned int iov_count,
125 unsigned int *out_num, unsigned int *in_num,
126 struct vhost_log *log, unsigned int *log_num);
127void vhost_discard_vq_desc(struct vhost_virtqueue *);
128
129int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
130void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
131void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
132 unsigned int head, int len);
133void vhost_disable_notify(struct vhost_virtqueue *);
134bool vhost_enable_notify(struct vhost_virtqueue *);
135
136int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
137 unsigned int log_num, u64 len);
138
139int vhost_init(void);
140void vhost_cleanup(void);
141
142#define vq_err(vq, fmt, ...) do { \
143 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
144 if ((vq)->error_ctx) \
145 eventfd_signal((vq)->error_ctx, 1);\
146 } while (0)
147
148enum {
149 VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
150 (1 << VIRTIO_RING_F_INDIRECT_DESC) |
151 (1 << VHOST_F_LOG_ALL) |
152 (1 << VHOST_NET_F_VIRTIO_NET_HDR),
153};
154
155static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
156{
157 unsigned acked_features = rcu_dereference(dev->acked_features);
158 return acked_features & (1 << bit);
159}
160
161#endif
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fbd2ecde93e..71929ee00d6 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -334,6 +334,30 @@ static bool vring_enable_cb(struct virtqueue *_vq)
334 return true; 334 return true;
335} 335}
336 336
337static void *vring_detach_unused_buf(struct virtqueue *_vq)
338{
339 struct vring_virtqueue *vq = to_vvq(_vq);
340 unsigned int i;
341 void *buf;
342
343 START_USE(vq);
344
345 for (i = 0; i < vq->vring.num; i++) {
346 if (!vq->data[i])
347 continue;
348 /* detach_buf clears data, so grab it now. */
349 buf = vq->data[i];
350 detach_buf(vq, i);
351 END_USE(vq);
352 return buf;
353 }
354 /* That should have freed everything. */
355 BUG_ON(vq->num_free != vq->vring.num);
356
357 END_USE(vq);
358 return NULL;
359}
360
337irqreturn_t vring_interrupt(int irq, void *_vq) 361irqreturn_t vring_interrupt(int irq, void *_vq)
338{ 362{
339 struct vring_virtqueue *vq = to_vvq(_vq); 363 struct vring_virtqueue *vq = to_vvq(_vq);
@@ -360,6 +384,7 @@ static struct virtqueue_ops vring_vq_ops = {
360 .kick = vring_kick, 384 .kick = vring_kick,
361 .disable_cb = vring_disable_cb, 385 .disable_cb = vring_disable_cb,
362 .enable_cb = vring_enable_cb, 386 .enable_cb = vring_enable_cb,
387 .detach_unused_buf = vring_detach_unused_buf,
363}; 388};
364 389
365struct virtqueue *vring_new_virtqueue(unsigned int num, 390struct virtqueue *vring_new_virtqueue(unsigned int num,
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 756f831cbdd..d93080748a9 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -362,6 +362,7 @@ unifdef-y += uio.h
362unifdef-y += unistd.h 362unifdef-y += unistd.h
363unifdef-y += usbdevice_fs.h 363unifdef-y += usbdevice_fs.h
364unifdef-y += utsname.h 364unifdef-y += utsname.h
365unifdef-y += vhost.h
365unifdef-y += videodev2.h 366unifdef-y += videodev2.h
366unifdef-y += videodev.h 367unifdef-y += videodev.h
367unifdef-y += virtio_config.h 368unifdef-y += virtio_config.h
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 3db7767d2a1..c8c660a79f9 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -38,6 +38,7 @@ struct can_priv {
38 38
39 enum can_state state; 39 enum can_state state;
40 u32 ctrlmode; 40 u32 ctrlmode;
41 u32 ctrlmode_supported;
41 42
42 int restart_ms; 43 int restart_ms;
43 struct timer_list restart_timer; 44 struct timer_list restart_timer;
@@ -60,6 +61,21 @@ struct can_priv {
60 */ 61 */
61#define get_can_dlc(i) (min_t(__u8, (i), 8)) 62#define get_can_dlc(i) (min_t(__u8, (i), 8))
62 63
64/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
65static inline int can_dropped_invalid_skb(struct net_device *dev,
66 struct sk_buff *skb)
67{
68 const struct can_frame *cf = (struct can_frame *)skb->data;
69
70 if (unlikely(skb->len != sizeof(*cf) || cf->can_dlc > 8)) {
71 kfree_skb(skb);
72 dev->stats.tx_dropped++;
73 return 1;
74 }
75
76 return 0;
77}
78
63struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); 79struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
64void free_candev(struct net_device *dev); 80void free_candev(struct net_device *dev);
65 81
diff --git a/include/linux/can/netlink.h b/include/linux/can/netlink.h
index 9ecbb7871c0..c818335fbb1 100644
--- a/include/linux/can/netlink.h
+++ b/include/linux/can/netlink.h
@@ -80,6 +80,7 @@ struct can_ctrlmode {
80#define CAN_CTRLMODE_LOOPBACK 0x1 /* Loopback mode */ 80#define CAN_CTRLMODE_LOOPBACK 0x1 /* Loopback mode */
81#define CAN_CTRLMODE_LISTENONLY 0x2 /* Listen-only mode */ 81#define CAN_CTRLMODE_LISTENONLY 0x2 /* Listen-only mode */
82#define CAN_CTRLMODE_3_SAMPLES 0x4 /* Triple sampling mode */ 82#define CAN_CTRLMODE_3_SAMPLES 0x4 /* Triple sampling mode */
83#define CAN_CTRLMODE_ONE_SHOT 0x8 /* One-Shot mode */
83 84
84/* 85/*
85 * CAN device statistics 86 * CAN device statistics
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 163c840437d..842701906ae 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -120,6 +120,24 @@
120#define IEEE80211_QOS_CTL_TID_MASK 0x000F 120#define IEEE80211_QOS_CTL_TID_MASK 0x000F
121#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 121#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
122 122
123/* U-APSD queue for WMM IEs sent by AP */
124#define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD (1<<7)
125
126/* U-APSD queues for WMM IEs sent by STA */
127#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VO (1<<0)
128#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VI (1<<1)
129#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BK (1<<2)
130#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BE (1<<3)
131#define IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK 0x0f
132
133/* U-APSD max SP length for WMM IEs sent by STA */
134#define IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 0x00
135#define IEEE80211_WMM_IE_STA_QOSINFO_SP_2 0x01
136#define IEEE80211_WMM_IE_STA_QOSINFO_SP_4 0x02
137#define IEEE80211_WMM_IE_STA_QOSINFO_SP_6 0x03
138#define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03
139#define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5
140
123struct ieee80211_hdr { 141struct ieee80211_hdr {
124 __le16 frame_control; 142 __le16 frame_control;
125 __le16 duration_id; 143 __le16 duration_id;
@@ -130,6 +148,25 @@ struct ieee80211_hdr {
130 u8 addr4[6]; 148 u8 addr4[6];
131} __attribute__ ((packed)); 149} __attribute__ ((packed));
132 150
151struct ieee80211_hdr_3addr {
152 __le16 frame_control;
153 __le16 duration_id;
154 u8 addr1[6];
155 u8 addr2[6];
156 u8 addr3[6];
157 __le16 seq_ctrl;
158} __attribute__ ((packed));
159
160struct ieee80211_qos_hdr {
161 __le16 frame_control;
162 __le16 duration_id;
163 u8 addr1[6];
164 u8 addr2[6];
165 u8 addr3[6];
166 __le16 seq_ctrl;
167 __le16 qos_ctrl;
168} __attribute__ ((packed));
169
133/** 170/**
134 * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set 171 * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
135 * @fc: frame control bytes in little-endian byteorder 172 * @fc: frame control bytes in little-endian byteorder
@@ -707,6 +744,10 @@ struct ieee80211_mgmt {
707 u8 action; 744 u8 action;
708 u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN]; 745 u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN];
709 } __attribute__ ((packed)) sa_query; 746 } __attribute__ ((packed)) sa_query;
747 struct {
748 u8 action;
749 u8 smps_control;
750 } __attribute__ ((packed)) ht_smps;
710 } u; 751 } u;
711 } __attribute__ ((packed)) action; 752 } __attribute__ ((packed)) action;
712 } u; 753 } u;
@@ -771,7 +812,10 @@ struct ieee80211_bar {
771/** 812/**
772 * struct ieee80211_mcs_info - MCS information 813 * struct ieee80211_mcs_info - MCS information
773 * @rx_mask: RX mask 814 * @rx_mask: RX mask
774 * @rx_highest: highest supported RX rate 815 * @rx_highest: highest supported RX rate. If set represents
816 * the highest supported RX data rate in units of 1 Mbps.
817 * If this field is 0 this value should not be used to
818 * consider the highest RX data rate supported.
775 * @tx_params: TX parameters 819 * @tx_params: TX parameters
776 */ 820 */
777struct ieee80211_mcs_info { 821struct ieee80211_mcs_info {
@@ -824,6 +868,7 @@ struct ieee80211_ht_cap {
824#define IEEE80211_HT_CAP_LDPC_CODING 0x0001 868#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
825#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002 869#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
826#define IEEE80211_HT_CAP_SM_PS 0x000C 870#define IEEE80211_HT_CAP_SM_PS 0x000C
871#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
827#define IEEE80211_HT_CAP_GRN_FLD 0x0010 872#define IEEE80211_HT_CAP_GRN_FLD 0x0010
828#define IEEE80211_HT_CAP_SGI_20 0x0020 873#define IEEE80211_HT_CAP_SGI_20 0x0020
829#define IEEE80211_HT_CAP_SGI_40 0x0040 874#define IEEE80211_HT_CAP_SGI_40 0x0040
@@ -839,6 +884,7 @@ struct ieee80211_ht_cap {
839/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */ 884/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
840#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03 885#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
841#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C 886#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
887#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
842 888
843/* 889/*
844 * Maximum length of AMPDU that the STA can receive. 890 * Maximum length of AMPDU that the STA can receive.
@@ -922,12 +968,17 @@ struct ieee80211_ht_info {
922#define IEEE80211_MAX_AMPDU_BUF 0x40 968#define IEEE80211_MAX_AMPDU_BUF 0x40
923 969
924 970
925/* Spatial Multiplexing Power Save Modes */ 971/* Spatial Multiplexing Power Save Modes (for capability) */
926#define WLAN_HT_CAP_SM_PS_STATIC 0 972#define WLAN_HT_CAP_SM_PS_STATIC 0
927#define WLAN_HT_CAP_SM_PS_DYNAMIC 1 973#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
928#define WLAN_HT_CAP_SM_PS_INVALID 2 974#define WLAN_HT_CAP_SM_PS_INVALID 2
929#define WLAN_HT_CAP_SM_PS_DISABLED 3 975#define WLAN_HT_CAP_SM_PS_DISABLED 3
930 976
977/* for SM power control field lower two bits */
978#define WLAN_HT_SMPS_CONTROL_DISABLED 0
979#define WLAN_HT_SMPS_CONTROL_STATIC 1
980#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
981
931/* Authentication algorithms */ 982/* Authentication algorithms */
932#define WLAN_AUTH_OPEN 0 983#define WLAN_AUTH_OPEN 0
933#define WLAN_AUTH_SHARED_KEY 1 984#define WLAN_AUTH_SHARED_KEY 1
@@ -1071,12 +1122,12 @@ enum ieee80211_eid {
1071 WLAN_EID_TIM = 5, 1122 WLAN_EID_TIM = 5,
1072 WLAN_EID_IBSS_PARAMS = 6, 1123 WLAN_EID_IBSS_PARAMS = 6,
1073 WLAN_EID_CHALLENGE = 16, 1124 WLAN_EID_CHALLENGE = 16,
1074 /* 802.11d */ 1125
1075 WLAN_EID_COUNTRY = 7, 1126 WLAN_EID_COUNTRY = 7,
1076 WLAN_EID_HP_PARAMS = 8, 1127 WLAN_EID_HP_PARAMS = 8,
1077 WLAN_EID_HP_TABLE = 9, 1128 WLAN_EID_HP_TABLE = 9,
1078 WLAN_EID_REQUEST = 10, 1129 WLAN_EID_REQUEST = 10,
1079 /* 802.11e */ 1130
1080 WLAN_EID_QBSS_LOAD = 11, 1131 WLAN_EID_QBSS_LOAD = 11,
1081 WLAN_EID_EDCA_PARAM_SET = 12, 1132 WLAN_EID_EDCA_PARAM_SET = 12,
1082 WLAN_EID_TSPEC = 13, 1133 WLAN_EID_TSPEC = 13,
@@ -1099,7 +1150,7 @@ enum ieee80211_eid {
1099 WLAN_EID_PREP = 69, 1150 WLAN_EID_PREP = 69,
1100 WLAN_EID_PERR = 70, 1151 WLAN_EID_PERR = 70,
1101 WLAN_EID_RANN = 49, /* compatible with FreeBSD */ 1152 WLAN_EID_RANN = 49, /* compatible with FreeBSD */
1102 /* 802.11h */ 1153
1103 WLAN_EID_PWR_CONSTRAINT = 32, 1154 WLAN_EID_PWR_CONSTRAINT = 32,
1104 WLAN_EID_PWR_CAPABILITY = 33, 1155 WLAN_EID_PWR_CAPABILITY = 33,
1105 WLAN_EID_TPC_REQUEST = 34, 1156 WLAN_EID_TPC_REQUEST = 34,
@@ -1110,20 +1161,41 @@ enum ieee80211_eid {
1110 WLAN_EID_MEASURE_REPORT = 39, 1161 WLAN_EID_MEASURE_REPORT = 39,
1111 WLAN_EID_QUIET = 40, 1162 WLAN_EID_QUIET = 40,
1112 WLAN_EID_IBSS_DFS = 41, 1163 WLAN_EID_IBSS_DFS = 41,
1113 /* 802.11g */ 1164
1114 WLAN_EID_ERP_INFO = 42, 1165 WLAN_EID_ERP_INFO = 42,
1115 WLAN_EID_EXT_SUPP_RATES = 50, 1166 WLAN_EID_EXT_SUPP_RATES = 50,
1116 /* 802.11n */ 1167
1117 WLAN_EID_HT_CAPABILITY = 45, 1168 WLAN_EID_HT_CAPABILITY = 45,
1118 WLAN_EID_HT_INFORMATION = 61, 1169 WLAN_EID_HT_INFORMATION = 61,
1119 /* 802.11i */ 1170
1120 WLAN_EID_RSN = 48, 1171 WLAN_EID_RSN = 48,
1121 WLAN_EID_TIMEOUT_INTERVAL = 56, 1172 WLAN_EID_MMIE = 76,
1122 WLAN_EID_MMIE = 76 /* 802.11w */,
1123 WLAN_EID_WPA = 221, 1173 WLAN_EID_WPA = 221,
1124 WLAN_EID_GENERIC = 221, 1174 WLAN_EID_GENERIC = 221,
1125 WLAN_EID_VENDOR_SPECIFIC = 221, 1175 WLAN_EID_VENDOR_SPECIFIC = 221,
1126 WLAN_EID_QOS_PARAMETER = 222 1176 WLAN_EID_QOS_PARAMETER = 222,
1177
1178 WLAN_EID_AP_CHAN_REPORT = 51,
1179 WLAN_EID_NEIGHBOR_REPORT = 52,
1180 WLAN_EID_RCPI = 53,
1181 WLAN_EID_BSS_AVG_ACCESS_DELAY = 63,
1182 WLAN_EID_ANTENNA_INFO = 64,
1183 WLAN_EID_RSNI = 65,
1184 WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66,
1185 WLAN_EID_BSS_AVAILABLE_CAPACITY = 67,
1186 WLAN_EID_BSS_AC_ACCESS_DELAY = 68,
1187 WLAN_EID_RRM_ENABLED_CAPABILITIES = 70,
1188 WLAN_EID_MULTIPLE_BSSID = 71,
1189
1190 WLAN_EID_MOBILITY_DOMAIN = 54,
1191 WLAN_EID_FAST_BSS_TRANSITION = 55,
1192 WLAN_EID_TIMEOUT_INTERVAL = 56,
1193 WLAN_EID_RIC_DATA = 57,
1194 WLAN_EID_RIC_DESCRIPTOR = 75,
1195
1196 WLAN_EID_DSE_REGISTERED_LOCATION = 58,
1197 WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
1198 WLAN_EID_EXT_CHANSWITCH_ANN = 60,
1127}; 1199};
1128 1200
1129/* Action category code */ 1201/* Action category code */
@@ -1150,6 +1222,18 @@ enum ieee80211_spectrum_mgmt_actioncode {
1150 WLAN_ACTION_SPCT_CHL_SWITCH = 4, 1222 WLAN_ACTION_SPCT_CHL_SWITCH = 4,
1151}; 1223};
1152 1224
1225/* HT action codes */
1226enum ieee80211_ht_actioncode {
1227 WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
1228 WLAN_HT_ACTION_SMPS = 1,
1229 WLAN_HT_ACTION_PSMP = 2,
1230 WLAN_HT_ACTION_PCO_PHASE = 3,
1231 WLAN_HT_ACTION_CSI = 4,
1232 WLAN_HT_ACTION_NONCOMPRESSED_BF = 5,
1233 WLAN_HT_ACTION_COMPRESSED_BF = 6,
1234 WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
1235};
1236
1153/* Security key length */ 1237/* Security key length */
1154enum ieee80211_key_len { 1238enum ieee80211_key_len {
1155 WLAN_KEY_LEN_WEP40 = 5, 1239 WLAN_KEY_LEN_WEP40 = 5,
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 3f5fd523b49..404abe00162 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -86,4 +86,18 @@ struct tun_filter {
86 __u8 addr[0][ETH_ALEN]; 86 __u8 addr[0][ETH_ALEN];
87}; 87};
88 88
89#ifdef __KERNEL__
90#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
91struct socket *tun_get_socket(struct file *);
92#else
93#include <linux/err.h>
94#include <linux/errno.h>
95struct file;
96struct socket;
97static inline struct socket *tun_get_socket(struct file *f)
98{
99 return ERR_PTR(-EINVAL);
100}
101#endif /* CONFIG_TUN */
102#endif /* __KERNEL__ */
89#endif /* __IF_TUN_H */ 103#endif /* __IF_TUN_H */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 724c27e5d17..93fc2449af1 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -153,6 +153,7 @@ extern int sysctl_igmp_max_msf;
153struct ip_sf_socklist { 153struct ip_sf_socklist {
154 unsigned int sl_max; 154 unsigned int sl_max;
155 unsigned int sl_count; 155 unsigned int sl_count;
156 struct rcu_head rcu;
156 __be32 sl_addr[0]; 157 __be32 sl_addr[0];
157}; 158};
158 159
@@ -170,6 +171,7 @@ struct ip_mc_socklist {
170 struct ip_mreqn multi; 171 struct ip_mreqn multi;
171 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ 172 unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
172 struct ip_sf_socklist *sflist; 173 struct ip_sf_socklist *sflist;
174 struct rcu_head rcu;
173}; 175};
174 176
175struct ip_sf_list { 177struct ip_sf_list {
diff --git a/include/linux/in.h b/include/linux/in.h
index b615649db12..583c76f9c30 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -84,6 +84,8 @@ struct in_addr {
84#define IP_ORIGDSTADDR 20 84#define IP_ORIGDSTADDR 20
85#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR 85#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
86 86
87#define IP_MINTTL 21
88
87/* IP_MTU_DISCOVER values */ 89/* IP_MTU_DISCOVER values */
88#define IP_PMTUDISC_DONT 0 /* Never send DF frames */ 90#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
89#define IP_PMTUDISC_WANT 1 /* Use per route hints */ 91#define IP_PMTUDISC_WANT 1 /* Use per route hints */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b2304929434..cf257809771 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -89,6 +89,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
89 89
90#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS) 90#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
91#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP) 91#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
92#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
92#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA) 93#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
93#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS) 94#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
94#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \ 95#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h
index 7acb87a4487..d3e5e9da0c8 100644
--- a/include/linux/isdn/capilli.h
+++ b/include/linux/isdn/capilli.h
@@ -50,8 +50,7 @@ struct capi_ctr {
50 u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb); 50 u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb);
51 51
52 char *(*procinfo)(struct capi_ctr *); 52 char *(*procinfo)(struct capi_ctr *);
53 int (*ctr_read_proc)(char *page, char **start, off_t off, 53 const struct file_operations *proc_fops;
54 int count, int *eof, struct capi_ctr *card);
55 54
56 /* filled in before calling ready callback */ 55 /* filled in before calling ready callback */
57 u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */ 56 u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */
diff --git a/include/linux/llc.h b/include/linux/llc.h
index 7733585603f..ad7074ba81a 100644
--- a/include/linux/llc.h
+++ b/include/linux/llc.h
@@ -36,6 +36,7 @@ enum llc_sockopts {
36 LLC_OPT_BUSY_TMR_EXP, /* busy state expire time (secs). */ 36 LLC_OPT_BUSY_TMR_EXP, /* busy state expire time (secs). */
37 LLC_OPT_TX_WIN, /* tx window size. */ 37 LLC_OPT_TX_WIN, /* tx window size. */
38 LLC_OPT_RX_WIN, /* rx window size. */ 38 LLC_OPT_RX_WIN, /* rx window size. */
39 LLC_OPT_PKTINFO, /* ancillary packet information. */
39 LLC_OPT_MAX 40 LLC_OPT_MAX
40}; 41};
41 42
@@ -70,6 +71,12 @@ enum llc_sockopts {
70#define LLC_SAP_RM 0xD4 /* Resource Management */ 71#define LLC_SAP_RM 0xD4 /* Resource Management */
71#define LLC_SAP_GLOBAL 0xFF /* Global SAP. */ 72#define LLC_SAP_GLOBAL 0xFF /* Global SAP. */
72 73
74struct llc_pktinfo {
75 int lpi_ifindex;
76 unsigned char lpi_sap;
77 unsigned char lpi_mac[IFHWADDRLEN];
78};
79
73#ifdef __KERNEL__ 80#ifdef __KERNEL__
74#define LLC_SAP_DYN_START 0xC0 81#define LLC_SAP_DYN_START 0xC0
75#define LLC_SAP_DYN_STOP 0xDE 82#define LLC_SAP_DYN_STOP 0xDE
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index adaf3c15e44..8b5f7cc0fba 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -30,6 +30,7 @@
30#define HPET_MINOR 228 30#define HPET_MINOR 228
31#define FUSE_MINOR 229 31#define FUSE_MINOR 229
32#define KVM_MINOR 232 32#define KVM_MINOR 232
33#define VHOST_NET_MINOR 233
33#define MISC_DYNAMIC_MINOR 255 34#define MISC_DYNAMIC_MINOR 255
34 35
35struct device; 36struct device;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a3fccc85b1a..93a32a5ca74 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -263,6 +263,11 @@ struct netdev_hw_addr_list {
263 int count; 263 int count;
264}; 264};
265 265
266#define netdev_uc_count(dev) ((dev)->uc.count)
267#define netdev_uc_empty(dev) ((dev)->uc.count == 0)
268#define netdev_for_each_uc_addr(ha, dev) \
269 list_for_each_entry(ha, &dev->uc.list, list)
270
266struct hh_cache { 271struct hh_cache {
267 struct hh_cache *hh_next; /* Next entry */ 272 struct hh_cache *hh_next; /* Next entry */
268 atomic_t hh_refcnt; /* number of users */ 273 atomic_t hh_refcnt; /* number of users */
@@ -621,30 +626,21 @@ struct net_device_ops {
621 struct net_device *dev); 626 struct net_device *dev);
622 u16 (*ndo_select_queue)(struct net_device *dev, 627 u16 (*ndo_select_queue)(struct net_device *dev,
623 struct sk_buff *skb); 628 struct sk_buff *skb);
624#define HAVE_CHANGE_RX_FLAGS
625 void (*ndo_change_rx_flags)(struct net_device *dev, 629 void (*ndo_change_rx_flags)(struct net_device *dev,
626 int flags); 630 int flags);
627#define HAVE_SET_RX_MODE
628 void (*ndo_set_rx_mode)(struct net_device *dev); 631 void (*ndo_set_rx_mode)(struct net_device *dev);
629#define HAVE_MULTICAST
630 void (*ndo_set_multicast_list)(struct net_device *dev); 632 void (*ndo_set_multicast_list)(struct net_device *dev);
631#define HAVE_SET_MAC_ADDR
632 int (*ndo_set_mac_address)(struct net_device *dev, 633 int (*ndo_set_mac_address)(struct net_device *dev,
633 void *addr); 634 void *addr);
634#define HAVE_VALIDATE_ADDR
635 int (*ndo_validate_addr)(struct net_device *dev); 635 int (*ndo_validate_addr)(struct net_device *dev);
636#define HAVE_PRIVATE_IOCTL
637 int (*ndo_do_ioctl)(struct net_device *dev, 636 int (*ndo_do_ioctl)(struct net_device *dev,
638 struct ifreq *ifr, int cmd); 637 struct ifreq *ifr, int cmd);
639#define HAVE_SET_CONFIG
640 int (*ndo_set_config)(struct net_device *dev, 638 int (*ndo_set_config)(struct net_device *dev,
641 struct ifmap *map); 639 struct ifmap *map);
642#define HAVE_CHANGE_MTU
643 int (*ndo_change_mtu)(struct net_device *dev, 640 int (*ndo_change_mtu)(struct net_device *dev,
644 int new_mtu); 641 int new_mtu);
645 int (*ndo_neigh_setup)(struct net_device *dev, 642 int (*ndo_neigh_setup)(struct net_device *dev,
646 struct neigh_parms *); 643 struct neigh_parms *);
647#define HAVE_TX_TIMEOUT
648 void (*ndo_tx_timeout) (struct net_device *dev); 644 void (*ndo_tx_timeout) (struct net_device *dev);
649 645
650 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 646 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
@@ -656,7 +652,6 @@ struct net_device_ops {
656 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 652 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
657 unsigned short vid); 653 unsigned short vid);
658#ifdef CONFIG_NET_POLL_CONTROLLER 654#ifdef CONFIG_NET_POLL_CONTROLLER
659#define HAVE_NETDEV_POLL
660 void (*ndo_poll_controller)(struct net_device *dev); 655 void (*ndo_poll_controller)(struct net_device *dev);
661#endif 656#endif
662#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 657#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
@@ -1527,7 +1522,6 @@ extern int netif_rx(struct sk_buff *skb);
1527extern int netif_rx_ni(struct sk_buff *skb); 1522extern int netif_rx_ni(struct sk_buff *skb);
1528#define HAVE_NETIF_RECEIVE_SKB 1 1523#define HAVE_NETIF_RECEIVE_SKB 1
1529extern int netif_receive_skb(struct sk_buff *skb); 1524extern int netif_receive_skb(struct sk_buff *skb);
1530extern void napi_gro_flush(struct napi_struct *napi);
1531extern gro_result_t dev_gro_receive(struct napi_struct *napi, 1525extern gro_result_t dev_gro_receive(struct napi_struct *napi,
1532 struct sk_buff *skb); 1526 struct sk_buff *skb);
1533extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); 1527extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 2524267210d..a765ea89854 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -21,15 +21,20 @@ struct netpoll {
21 __be32 local_ip, remote_ip; 21 __be32 local_ip, remote_ip;
22 u16 local_port, remote_port; 22 u16 local_port, remote_port;
23 u8 remote_mac[ETH_ALEN]; 23 u8 remote_mac[ETH_ALEN];
24
25 struct list_head rx; /* rx_np list element */
24}; 26};
25 27
26struct netpoll_info { 28struct netpoll_info {
27 atomic_t refcnt; 29 atomic_t refcnt;
30
28 int rx_flags; 31 int rx_flags;
29 spinlock_t rx_lock; 32 spinlock_t rx_lock;
30 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 33 struct list_head rx_np; /* netpolls that registered an rx_hook */
34
31 struct sk_buff_head arp_tx; /* list of arp requests to reply to */ 35 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
32 struct sk_buff_head txq; 36 struct sk_buff_head txq;
37
33 struct delayed_work tx_work; 38 struct delayed_work tx_work;
34}; 39};
35 40
@@ -51,7 +56,7 @@ static inline int netpoll_rx(struct sk_buff *skb)
51 unsigned long flags; 56 unsigned long flags;
52 int ret = 0; 57 int ret = 0;
53 58
54 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags)) 59 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
55 return 0; 60 return 0;
56 61
57 spin_lock_irqsave(&npinfo->rx_lock, flags); 62 spin_lock_irqsave(&npinfo->rx_lock, flags);
@@ -67,7 +72,7 @@ static inline int netpoll_rx_on(struct sk_buff *skb)
67{ 72{
68 struct netpoll_info *npinfo = skb->dev->npinfo; 73 struct netpoll_info *npinfo = skb->dev->npinfo;
69 74
70 return npinfo && (npinfo->rx_np || npinfo->rx_flags); 75 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
71} 76}
72 77
73static inline int netpoll_receive_skb(struct sk_buff *skb) 78static inline int netpoll_receive_skb(struct sk_buff *skb)
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index da8ea2e1927..127a7301576 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -270,6 +270,35 @@
270 * @NL80211_CMD_SET_WIPHY_NETNS: Set a wiphy's netns. Note that all devices 270 * @NL80211_CMD_SET_WIPHY_NETNS: Set a wiphy's netns. Note that all devices
271 * associated with this wiphy must be down and will follow. 271 * associated with this wiphy must be down and will follow.
272 * 272 *
273 * @NL80211_CMD_REMAIN_ON_CHANNEL: Request to remain awake on the specified
274 * channel for the specified amount of time. This can be used to do
275 * off-channel operations like transmit a Public Action frame and wait for
276 * a response while being associated to an AP on another channel.
277 * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify which
278 * radio is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the
279 * frequency for the operation and %NL80211_ATTR_WIPHY_CHANNEL_TYPE may be
280 * optionally used to specify additional channel parameters.
281 * %NL80211_ATTR_DURATION is used to specify the duration in milliseconds
282 * to remain on the channel. This command is also used as an event to
283 * notify when the requested duration starts (it may take a while for the
284 * driver to schedule this time due to other concurrent needs for the
285 * radio).
286 * When called, this operation returns a cookie (%NL80211_ATTR_COOKIE)
287 * that will be included with any events pertaining to this request;
288 * the cookie is also used to cancel the request.
289 * @NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL: This command can be used to cancel a
290 * pending remain-on-channel duration if the desired operation has been
291 * completed prior to expiration of the originally requested duration.
292 * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify the
293 * radio. The %NL80211_ATTR_COOKIE attribute must be given as well to
294 * uniquely identify the request.
295 * This command is also used as an event to notify when a requested
296 * remain-on-channel duration has expired.
297 *
298 * @NL80211_CMD_SET_TX_BITRATE_MASK: Set the mask of rates to be used in TX
299 * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface
300 * and @NL80211_ATTR_TX_RATES the set of allowed rates.
301 *
273 * @NL80211_CMD_MAX: highest used command number 302 * @NL80211_CMD_MAX: highest used command number
274 * @__NL80211_CMD_AFTER_LAST: internal use 303 * @__NL80211_CMD_AFTER_LAST: internal use
275 */ 304 */
@@ -353,6 +382,11 @@ enum nl80211_commands {
353 NL80211_CMD_DEL_PMKSA, 382 NL80211_CMD_DEL_PMKSA,
354 NL80211_CMD_FLUSH_PMKSA, 383 NL80211_CMD_FLUSH_PMKSA,
355 384
385 NL80211_CMD_REMAIN_ON_CHANNEL,
386 NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
387
388 NL80211_CMD_SET_TX_BITRATE_MASK,
389
356 /* add new commands above here */ 390 /* add new commands above here */
357 391
358 /* used to define NL80211_CMD_MAX below */ 392 /* used to define NL80211_CMD_MAX below */
@@ -402,6 +436,8 @@ enum nl80211_commands {
402 * @NL80211_ATTR_WIPHY_RTS_THRESHOLD: RTS threshold (TX frames with length 436 * @NL80211_ATTR_WIPHY_RTS_THRESHOLD: RTS threshold (TX frames with length
403 * larger than or equal to this use RTS/CTS handshake); allowed range: 437 * larger than or equal to this use RTS/CTS handshake); allowed range:
404 * 0..65536, disable with (u32)-1; dot11RTSThreshold; u32 438 * 0..65536, disable with (u32)-1; dot11RTSThreshold; u32
439 * @NL80211_ATTR_WIPHY_COVERAGE_CLASS: Coverage Class as defined by IEEE 802.11
440 * section 7.3.2.9; dot11CoverageClass; u8
405 * 441 *
406 * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on 442 * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on
407 * @NL80211_ATTR_IFNAME: network interface name 443 * @NL80211_ATTR_IFNAME: network interface name
@@ -606,6 +642,17 @@ enum nl80211_commands {
606 * @NL80211_ATTR_MAX_NUM_PMKIDS: maximum number of PMKIDs a firmware can 642 * @NL80211_ATTR_MAX_NUM_PMKIDS: maximum number of PMKIDs a firmware can
607 * cache, a wiphy attribute. 643 * cache, a wiphy attribute.
608 * 644 *
645 * @NL80211_ATTR_DURATION: Duration of an operation in milliseconds, u32.
646 *
647 * @NL80211_ATTR_COOKIE: Generic 64-bit cookie to identify objects.
648 *
649 * @NL80211_ATTR_TX_RATES: Nested set of attributes
650 * (enum nl80211_tx_rate_attributes) describing TX rates per band. The
651 * enum nl80211_band value is used as the index (nla_type() of the nested
652 * data. If a band is not included, it will be configured to allow all
653 * rates based on negotiated supported rates information. This attribute
654 * is used with %NL80211_CMD_SET_TX_BITRATE_MASK.
655 *
609 * @NL80211_ATTR_MAX: highest attribute number currently defined 656 * @NL80211_ATTR_MAX: highest attribute number currently defined
610 * @__NL80211_ATTR_AFTER_LAST: internal use 657 * @__NL80211_ATTR_AFTER_LAST: internal use
611 */ 658 */
@@ -743,6 +790,14 @@ enum nl80211_attrs {
743 NL80211_ATTR_PMKID, 790 NL80211_ATTR_PMKID,
744 NL80211_ATTR_MAX_NUM_PMKIDS, 791 NL80211_ATTR_MAX_NUM_PMKIDS,
745 792
793 NL80211_ATTR_DURATION,
794
795 NL80211_ATTR_COOKIE,
796
797 NL80211_ATTR_WIPHY_COVERAGE_CLASS,
798
799 NL80211_ATTR_TX_RATES,
800
746 /* add attributes here, update the policy in nl80211.c */ 801 /* add attributes here, update the policy in nl80211.c */
747 802
748 __NL80211_ATTR_AFTER_LAST, 803 __NL80211_ATTR_AFTER_LAST,
@@ -1323,13 +1378,20 @@ enum nl80211_channel_type {
1323 * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) 1378 * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16)
1324 * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16) 1379 * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16)
1325 * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the 1380 * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the
1326 * raw information elements from the probe response/beacon (bin) 1381 * raw information elements from the probe response/beacon (bin);
1382 * if the %NL80211_BSS_BEACON_IES attribute is present, the IEs here are
1383 * from a Probe Response frame; otherwise they are from a Beacon frame.
1384 * However, if the driver does not indicate the source of the IEs, these
1385 * IEs may be from either frame subtype.
1327 * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon 1386 * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon
1328 * in mBm (100 * dBm) (s32) 1387 * in mBm (100 * dBm) (s32)
1329 * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon 1388 * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon
1330 * in unspecified units, scaled to 0..100 (u8) 1389 * in unspecified units, scaled to 0..100 (u8)
1331 * @NL80211_BSS_STATUS: status, if this BSS is "used" 1390 * @NL80211_BSS_STATUS: status, if this BSS is "used"
1332 * @NL80211_BSS_SEEN_MS_AGO: age of this BSS entry in ms 1391 * @NL80211_BSS_SEEN_MS_AGO: age of this BSS entry in ms
1392 * @NL80211_BSS_BEACON_IES: binary attribute containing the raw information
1393 * elements from a Beacon frame (bin); not present if no Beacon frame has
1394 * yet been received
1333 * @__NL80211_BSS_AFTER_LAST: internal 1395 * @__NL80211_BSS_AFTER_LAST: internal
1334 * @NL80211_BSS_MAX: highest BSS attribute 1396 * @NL80211_BSS_MAX: highest BSS attribute
1335 */ 1397 */
@@ -1345,6 +1407,7 @@ enum nl80211_bss {
1345 NL80211_BSS_SIGNAL_UNSPEC, 1407 NL80211_BSS_SIGNAL_UNSPEC,
1346 NL80211_BSS_STATUS, 1408 NL80211_BSS_STATUS,
1347 NL80211_BSS_SEEN_MS_AGO, 1409 NL80211_BSS_SEEN_MS_AGO,
1410 NL80211_BSS_BEACON_IES,
1348 1411
1349 /* keep last */ 1412 /* keep last */
1350 __NL80211_BSS_AFTER_LAST, 1413 __NL80211_BSS_AFTER_LAST,
@@ -1442,4 +1505,33 @@ enum nl80211_key_attributes {
1442 NL80211_KEY_MAX = __NL80211_KEY_AFTER_LAST - 1 1505 NL80211_KEY_MAX = __NL80211_KEY_AFTER_LAST - 1
1443}; 1506};
1444 1507
1508/**
1509 * enum nl80211_tx_rate_attributes - TX rate set attributes
1510 * @__NL80211_TXRATE_INVALID: invalid
1511 * @NL80211_TXRATE_LEGACY: Legacy (non-MCS) rates allowed for TX rate selection
1512 * in an array of rates as defined in IEEE 802.11 7.3.2.2 (u8 values with
1513 * 1 = 500 kbps) but without the IE length restriction (at most
1514 * %NL80211_MAX_SUPP_RATES in a single array).
1515 * @__NL80211_TXRATE_AFTER_LAST: internal
1516 * @NL80211_TXRATE_MAX: highest TX rate attribute
1517 */
1518enum nl80211_tx_rate_attributes {
1519 __NL80211_TXRATE_INVALID,
1520 NL80211_TXRATE_LEGACY,
1521
1522 /* keep last */
1523 __NL80211_TXRATE_AFTER_LAST,
1524 NL80211_TXRATE_MAX = __NL80211_TXRATE_AFTER_LAST - 1
1525};
1526
1527/**
1528 * enum nl80211_band - Frequency band
1529 * @NL80211_BAND_2GHZ - 2.4 GHz ISM band
1530 * @NL80211_BAND_5GHZ - around 5 GHz band (4.9 - 5.7 GHz)
1531 */
1532enum nl80211_band {
1533 NL80211_BAND_2GHZ,
1534 NL80211_BAND_5GHZ,
1535};
1536
1445#endif /* __LINUX_NL80211_H */ 1537#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 05330fc5b43..9590364fe8b 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -362,6 +362,8 @@ enum {
362#define RTAX_FEATURES RTAX_FEATURES 362#define RTAX_FEATURES RTAX_FEATURES
363 RTAX_RTO_MIN, 363 RTAX_RTO_MIN,
364#define RTAX_RTO_MIN RTAX_RTO_MIN 364#define RTAX_RTO_MIN RTAX_RTO_MIN
365 RTAX_INITRWND,
366#define RTAX_INITRWND RTAX_INITRWND
365 __RTAX_MAX 367 __RTAX_MAX
366}; 368};
367 369
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
new file mode 100644
index 00000000000..32bfd1a8a48
--- /dev/null
+++ b/include/linux/stmmac.h
@@ -0,0 +1,53 @@
1/*******************************************************************************
2
3 Header file for stmmac platform data
4
5 Copyright (C) 2009 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24*******************************************************************************/
25
26#ifndef __STMMAC_PLATFORM_DATA
27#define __STMMAC_PLATFORM_DATA
28
29/* platfrom data for platfrom device structure's platfrom_data field */
30
31/* Private data for the STM on-board ethernet driver */
32struct plat_stmmacenet_data {
33 int bus_id;
34 int pbl;
35 int has_gmac;
36 void (*fix_mac_speed)(void *priv, unsigned int speed);
37 void (*bus_setup)(unsigned long ioaddr);
38#ifdef CONFIG_STM_DRIVERS
39 struct stm_pad_config *pad_config;
40#endif
41 void *bsp_priv;
42};
43
44struct plat_stmmacphy_data {
45 int bus_id;
46 int phy_addr;
47 unsigned int phy_mask;
48 int interface;
49 int (*phy_reset)(void *priv);
50 void *priv;
51};
52#endif
53
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index bd27fbc9db6..9f236cdcf3f 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -483,6 +483,7 @@ enum
483 NET_IPV4_CONF_ARP_NOTIFY=22, 483 NET_IPV4_CONF_ARP_NOTIFY=22,
484 NET_IPV4_CONF_ACCEPT_LOCAL=23, 484 NET_IPV4_CONF_ACCEPT_LOCAL=23,
485 NET_IPV4_CONF_SRC_VMARK=24, 485 NET_IPV4_CONF_SRC_VMARK=24,
486 NET_IPV4_CONF_PROXY_ARP_PVLAN=25,
486 __NET_IPV4_CONF_MAX 487 __NET_IPV4_CONF_MAX
487}; 488};
488 489
diff --git a/include/linux/vhost.h b/include/linux/vhost.h
new file mode 100644
index 00000000000..e847f1e3075
--- /dev/null
+++ b/include/linux/vhost.h
@@ -0,0 +1,130 @@
1#ifndef _LINUX_VHOST_H
2#define _LINUX_VHOST_H
3/* Userspace interface for in-kernel virtio accelerators. */
4
5/* vhost is used to reduce the number of system calls involved in virtio.
6 *
7 * Existing virtio net code is used in the guest without modification.
8 *
9 * This header includes interface used by userspace hypervisor for
10 * device configuration.
11 */
12
13#include <linux/types.h>
14#include <linux/compiler.h>
15#include <linux/ioctl.h>
16#include <linux/virtio_config.h>
17#include <linux/virtio_ring.h>
18
19struct vhost_vring_state {
20 unsigned int index;
21 unsigned int num;
22};
23
24struct vhost_vring_file {
25 unsigned int index;
26 int fd; /* Pass -1 to unbind from file. */
27
28};
29
30struct vhost_vring_addr {
31 unsigned int index;
32 /* Option flags. */
33 unsigned int flags;
34 /* Flag values: */
35 /* Whether log address is valid. If set enables logging. */
36#define VHOST_VRING_F_LOG 0
37
38 /* Start of array of descriptors (virtually contiguous) */
39 __u64 desc_user_addr;
40 /* Used structure address. Must be 32 bit aligned */
41 __u64 used_user_addr;
42 /* Available structure address. Must be 16 bit aligned */
43 __u64 avail_user_addr;
44 /* Logging support. */
45 /* Log writes to used structure, at offset calculated from specified
46 * address. Address must be 32 bit aligned. */
47 __u64 log_guest_addr;
48};
49
50struct vhost_memory_region {
51 __u64 guest_phys_addr;
52 __u64 memory_size; /* bytes */
53 __u64 userspace_addr;
54 __u64 flags_padding; /* No flags are currently specified. */
55};
56
57/* All region addresses and sizes must be 4K aligned. */
58#define VHOST_PAGE_SIZE 0x1000
59
60struct vhost_memory {
61 __u32 nregions;
62 __u32 padding;
63 struct vhost_memory_region regions[0];
64};
65
66/* ioctls */
67
68#define VHOST_VIRTIO 0xAF
69
70/* Features bitmask for forward compatibility. Transport bits are used for
71 * vhost specific features. */
72#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
73#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
74
75/* Set current process as the (exclusive) owner of this file descriptor. This
76 * must be called before any other vhost command. Further calls to
77 * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */
78#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
79/* Give up ownership, and reset the device to default values.
80 * Allows subsequent call to VHOST_OWNER_SET to succeed. */
81#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
82
83/* Set up/modify memory layout */
84#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory)
85
86/* Write logging setup. */
87/* Memory writes can optionally be logged by setting bit at an offset
88 * (calculated from the physical address) from specified log base.
89 * The bit is set using an atomic 32 bit operation. */
90/* Set base address for logging. */
91#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
92/* Specify an eventfd file descriptor to signal on log write. */
93#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
94
95/* Ring setup. */
96/* Set number of descriptors in ring. This parameter can not
97 * be modified while ring is running (bound to a device). */
98#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
99/* Set addresses for the ring. */
100#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
101/* Base value where queue looks for available descriptors */
102#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
103/* Get accessor: reads index, writes value in num */
104#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
105
106/* The following ioctls use eventfd file descriptors to signal and poll
107 * for events. */
108
109/* Set eventfd to poll for added buffers */
110#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
111/* Set eventfd to signal when buffers have beed used */
112#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
113/* Set eventfd to signal an error */
114#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
115
116/* VHOST_NET specific defines */
117
118/* Attach virtio net ring to a raw socket, or tap device.
119 * The socket must be already bound to an ethernet device, this device will be
120 * used for transmit. Pass fd -1 to unbind from the socket and the transmit
121 * device. This can be used to stop the ring (e.g. for migration). */
122#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
123
124/* Feature bits */
125/* Log all write descriptors. Can be changed while device is active. */
126#define VHOST_F_LOG_ALL 26
127/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
128#define VHOST_NET_F_VIRTIO_NET_HDR 27
129
130#endif
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 057a2e01075..f508c651e53 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -51,6 +51,9 @@ struct virtqueue {
51 * This re-enables callbacks; it returns "false" if there are pending 51 * This re-enables callbacks; it returns "false" if there are pending
52 * buffers in the queue, to detect a possible race between the driver 52 * buffers in the queue, to detect a possible race between the driver
53 * checking for more work, and enabling callbacks. 53 * checking for more work, and enabling callbacks.
54 * @detach_unused_buf: detach first unused buffer
55 * vq: the struct virtqueue we're talking about.
56 * Returns NULL or the "data" token handed to add_buf
54 * 57 *
55 * Locking rules are straightforward: the driver is responsible for 58 * Locking rules are straightforward: the driver is responsible for
56 * locking. No two operations may be invoked simultaneously, with the exception 59 * locking. No two operations may be invoked simultaneously, with the exception
@@ -71,6 +74,7 @@ struct virtqueue_ops {
71 74
72 void (*disable_cb)(struct virtqueue *vq); 75 void (*disable_cb)(struct virtqueue *vq);
73 bool (*enable_cb)(struct virtqueue *vq); 76 bool (*enable_cb)(struct virtqueue *vq);
77 void *(*detach_unused_buf)(struct virtqueue *vq);
74}; 78};
75 79
76/** 80/**
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0884b9a0f77..2af52704e67 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -39,8 +39,8 @@
39 * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7) 39 * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
40 */ 40 */
41enum ieee80211_band { 41enum ieee80211_band {
42 IEEE80211_BAND_2GHZ, 42 IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
43 IEEE80211_BAND_5GHZ, 43 IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ,
44 44
45 /* keep last */ 45 /* keep last */
46 IEEE80211_NUM_BANDS 46 IEEE80211_NUM_BANDS
@@ -626,8 +626,14 @@ enum cfg80211_signal_type {
626 * @beacon_interval: the beacon interval as from the frame 626 * @beacon_interval: the beacon interval as from the frame
627 * @capability: the capability field in host byte order 627 * @capability: the capability field in host byte order
628 * @information_elements: the information elements (Note that there 628 * @information_elements: the information elements (Note that there
629 * is no guarantee that these are well-formed!) 629 * is no guarantee that these are well-formed!); this is a pointer to
630 * either the beacon_ies or proberesp_ies depending on whether Probe
631 * Response frame has been received
630 * @len_information_elements: total length of the information elements 632 * @len_information_elements: total length of the information elements
633 * @beacon_ies: the information elements from the last Beacon frame
634 * @len_beacon_ies: total length of the beacon_ies
635 * @proberesp_ies: the information elements from the last Probe Response frame
636 * @len_proberesp_ies: total length of the proberesp_ies
631 * @signal: signal strength value (type depends on the wiphy's signal_type) 637 * @signal: signal strength value (type depends on the wiphy's signal_type)
632 * @free_priv: function pointer to free private data 638 * @free_priv: function pointer to free private data
633 * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes 639 * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
@@ -641,6 +647,10 @@ struct cfg80211_bss {
641 u16 capability; 647 u16 capability;
642 u8 *information_elements; 648 u8 *information_elements;
643 size_t len_information_elements; 649 size_t len_information_elements;
650 u8 *beacon_ies;
651 size_t len_beacon_ies;
652 u8 *proberesp_ies;
653 size_t len_proberesp_ies;
644 654
645 s32 signal; 655 s32 signal;
646 656
@@ -837,6 +847,7 @@ enum wiphy_params_flags {
837 WIPHY_PARAM_RETRY_LONG = 1 << 1, 847 WIPHY_PARAM_RETRY_LONG = 1 << 1,
838 WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2, 848 WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2,
839 WIPHY_PARAM_RTS_THRESHOLD = 1 << 3, 849 WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
850 WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
840}; 851};
841 852
842/** 853/**
@@ -856,20 +867,11 @@ enum tx_power_setting {
856 * cfg80211_bitrate_mask - masks for bitrate control 867 * cfg80211_bitrate_mask - masks for bitrate control
857 */ 868 */
858struct cfg80211_bitrate_mask { 869struct cfg80211_bitrate_mask {
859/*
860 * As discussed in Berlin, this struct really
861 * should look like this:
862
863 struct { 870 struct {
864 u32 legacy; 871 u32 legacy;
865 u8 mcs[IEEE80211_HT_MCS_MASK_LEN]; 872 /* TODO: add support for masking MCS rates; e.g.: */
873 /* u8 mcs[IEEE80211_HT_MCS_MASK_LEN]; */
866 } control[IEEE80211_NUM_BANDS]; 874 } control[IEEE80211_NUM_BANDS];
867
868 * Since we can always fix in-kernel users, let's keep
869 * it simpler for now:
870 */
871 u32 fixed; /* fixed bitrate, 0 == not fixed */
872 u32 maxrate; /* in kbps, 0 == no limit */
873}; 875};
874/** 876/**
875 * struct cfg80211_pmksa - PMK Security Association 877 * struct cfg80211_pmksa - PMK Security Association
@@ -988,6 +990,15 @@ struct cfg80211_pmksa {
988 * 990 *
989 * @dump_survey: get site survey information. 991 * @dump_survey: get site survey information.
990 * 992 *
993 * @remain_on_channel: Request the driver to remain awake on the specified
994 * channel for the specified duration to complete an off-channel
995 * operation (e.g., public action frame exchange). When the driver is
996 * ready on the requested channel, it must indicate this with an event
997 * notification by calling cfg80211_ready_on_channel().
998 * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation.
999 * This allows the operation to be terminated prior to timeout based on
1000 * the duration value.
1001 *
991 * @testmode_cmd: run a test mode command 1002 * @testmode_cmd: run a test mode command
992 * 1003 *
993 * @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac 1004 * @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac
@@ -1123,6 +1134,16 @@ struct cfg80211_ops {
1123 struct cfg80211_pmksa *pmksa); 1134 struct cfg80211_pmksa *pmksa);
1124 int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev); 1135 int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev);
1125 1136
1137 int (*remain_on_channel)(struct wiphy *wiphy,
1138 struct net_device *dev,
1139 struct ieee80211_channel *chan,
1140 enum nl80211_channel_type channel_type,
1141 unsigned int duration,
1142 u64 *cookie);
1143 int (*cancel_remain_on_channel)(struct wiphy *wiphy,
1144 struct net_device *dev,
1145 u64 cookie);
1146
1126 /* some temporary stuff to finish wext */ 1147 /* some temporary stuff to finish wext */
1127 int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev, 1148 int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
1128 bool enabled, int timeout); 1149 bool enabled, int timeout);
@@ -1217,6 +1238,7 @@ struct wiphy {
1217 u8 retry_long; 1238 u8 retry_long;
1218 u32 frag_threshold; 1239 u32 frag_threshold;
1219 u32 rts_threshold; 1240 u32 rts_threshold;
1241 u8 coverage_class;
1220 1242
1221 char fw_version[ETHTOOL_BUSINFO_LEN]; 1243 char fw_version[ETHTOOL_BUSINFO_LEN];
1222 u32 hw_version; 1244 u32 hw_version;
@@ -1578,7 +1600,7 @@ unsigned int ieee80211_hdrlen(__le16 fc);
1578 * @addr: the device MAC address 1600 * @addr: the device MAC address
1579 * @iftype: the virtual interface type 1601 * @iftype: the virtual interface type
1580 */ 1602 */
1581int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr, 1603int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
1582 enum nl80211_iftype iftype); 1604 enum nl80211_iftype iftype);
1583 1605
1584/** 1606/**
@@ -1589,10 +1611,28 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
1589 * @bssid: the network bssid (used only for iftype STATION and ADHOC) 1611 * @bssid: the network bssid (used only for iftype STATION and ADHOC)
1590 * @qos: build 802.11 QoS data frame 1612 * @qos: build 802.11 QoS data frame
1591 */ 1613 */
1592int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr, 1614int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
1593 enum nl80211_iftype iftype, u8 *bssid, bool qos); 1615 enum nl80211_iftype iftype, u8 *bssid, bool qos);
1594 1616
1595/** 1617/**
1618 * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
1619 *
1620 * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of
1621 * 802.3 frames. The @list will be empty if the decode fails. The
1622 * @skb is consumed after the function returns.
1623 *
1624 * @skb: The input IEEE 802.11n A-MSDU frame.
1625 * @list: The output list of 802.3 frames. It must be allocated and
1626 * initialized by by the caller.
1627 * @addr: The device MAC address.
1628 * @iftype: The device interface type.
1629 * @extra_headroom: The hardware extra headroom for SKBs in the @list.
1630 */
1631void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
1632 const u8 *addr, enum nl80211_iftype iftype,
1633 const unsigned int extra_headroom);
1634
1635/**
1596 * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame 1636 * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
1597 * @skb: the data frame 1637 * @skb: the data frame
1598 */ 1638 */
@@ -2129,5 +2169,45 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
2129void cfg80211_disconnected(struct net_device *dev, u16 reason, 2169void cfg80211_disconnected(struct net_device *dev, u16 reason,
2130 u8 *ie, size_t ie_len, gfp_t gfp); 2170 u8 *ie, size_t ie_len, gfp_t gfp);
2131 2171
2172/**
2173 * cfg80211_ready_on_channel - notification of remain_on_channel start
2174 * @dev: network device
2175 * @cookie: the request cookie
2176 * @chan: The current channel (from remain_on_channel request)
2177 * @channel_type: Channel type
2178 * @duration: Duration in milliseconds that the driver intents to remain on the
2179 * channel
2180 * @gfp: allocation flags
2181 */
2182void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
2183 struct ieee80211_channel *chan,
2184 enum nl80211_channel_type channel_type,
2185 unsigned int duration, gfp_t gfp);
2186
2187/**
2188 * cfg80211_remain_on_channel_expired - remain_on_channel duration expired
2189 * @dev: network device
2190 * @cookie: the request cookie
2191 * @chan: The current channel (from remain_on_channel request)
2192 * @channel_type: Channel type
2193 * @gfp: allocation flags
2194 */
2195void cfg80211_remain_on_channel_expired(struct net_device *dev,
2196 u64 cookie,
2197 struct ieee80211_channel *chan,
2198 enum nl80211_channel_type channel_type,
2199 gfp_t gfp);
2200
2201
2202/**
2203 * cfg80211_new_sta - notify userspace about station
2204 *
2205 * @dev: the netdev
2206 * @mac_addr: the station's address
2207 * @sinfo: the station information
2208 * @gfp: allocation flags
2209 */
2210void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
2211 struct station_info *sinfo, gfp_t gfp);
2132 2212
2133#endif /* __NET_CFG80211_H */ 2213#endif /* __NET_CFG80211_H */
diff --git a/include/net/dst.h b/include/net/dst.h
index 39c4a5963e1..ce078cda6b7 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -83,8 +83,6 @@ struct dst_entry {
83 * (L1_CACHE_SIZE would be too much) 83 * (L1_CACHE_SIZE would be too much)
84 */ 84 */
85#ifdef CONFIG_64BIT 85#ifdef CONFIG_64BIT
86 long __pad_to_align_refcnt[2];
87#else
88 long __pad_to_align_refcnt[1]; 86 long __pad_to_align_refcnt[1];
89#endif 87#endif
90 /* 88 /*
diff --git a/include/net/icmp.h b/include/net/icmp.h
index dfa72d4e890..15b3dfe9fce 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -28,7 +28,7 @@ struct icmp_err {
28 unsigned fatal:1; 28 unsigned fatal:1;
29}; 29};
30 30
31extern struct icmp_err icmp_err_convert[]; 31extern const struct icmp_err icmp_err_convert[];
32#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field) 32#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
33#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) 33#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
34#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256) 34#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256)
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index bd4c53f75ac..83fd34437cf 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -122,10 +122,12 @@ struct inet_sock {
122 __be32 inet_saddr; 122 __be32 inet_saddr;
123 __s16 uc_ttl; 123 __s16 uc_ttl;
124 __u16 cmsg_flags; 124 __u16 cmsg_flags;
125 struct ip_options *opt;
126 __be16 inet_sport; 125 __be16 inet_sport;
127 __u16 inet_id; 126 __u16 inet_id;
127
128 struct ip_options *opt;
128 __u8 tos; 129 __u8 tos;
130 __u8 min_ttl;
129 __u8 mc_ttl; 131 __u8 mc_ttl;
130 __u8 pmtudisc; 132 __u8 pmtudisc;
131 __u8 recverr:1, 133 __u8 recverr:1,
diff --git a/include/net/llc.h b/include/net/llc.h
index 7940da1606e..5503b74ab17 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -16,6 +16,9 @@
16#include <linux/if_ether.h> 16#include <linux/if_ether.h>
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/rculist_nulls.h>
20#include <linux/hash.h>
21#include <linux/jhash.h>
19 22
20#include <asm/atomic.h> 23#include <asm/atomic.h>
21 24
@@ -31,6 +34,12 @@ struct llc_addr {
31#define LLC_SAP_STATE_INACTIVE 1 34#define LLC_SAP_STATE_INACTIVE 1
32#define LLC_SAP_STATE_ACTIVE 2 35#define LLC_SAP_STATE_ACTIVE 2
33 36
37#define LLC_SK_DEV_HASH_BITS 6
38#define LLC_SK_DEV_HASH_ENTRIES (1<<LLC_SK_DEV_HASH_BITS)
39
40#define LLC_SK_LADDR_HASH_BITS 6
41#define LLC_SK_LADDR_HASH_ENTRIES (1<<LLC_SK_LADDR_HASH_BITS)
42
34/** 43/**
35 * struct llc_sap - Defines the SAP component 44 * struct llc_sap - Defines the SAP component
36 * 45 *
@@ -53,18 +62,38 @@ struct llc_sap {
53 struct net_device *orig_dev); 62 struct net_device *orig_dev);
54 struct llc_addr laddr; 63 struct llc_addr laddr;
55 struct list_head node; 64 struct list_head node;
56 struct { 65 spinlock_t sk_lock;
57 rwlock_t lock; 66 int sk_count;
58 struct hlist_head list; 67 struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES];
59 } sk_list; 68 struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES];
60}; 69};
61 70
71static inline
72struct hlist_head *llc_sk_dev_hash(struct llc_sap *sap, int ifindex)
73{
74 return &sap->sk_dev_hash[ifindex % LLC_SK_DEV_HASH_ENTRIES];
75}
76
77static inline
78u32 llc_sk_laddr_hashfn(struct llc_sap *sap, const struct llc_addr *laddr)
79{
80 return hash_32(jhash(laddr->mac, sizeof(laddr->mac), 0),
81 LLC_SK_LADDR_HASH_BITS);
82}
83
84static inline
85struct hlist_nulls_head *llc_sk_laddr_hash(struct llc_sap *sap,
86 const struct llc_addr *laddr)
87{
88 return &sap->sk_laddr_hash[llc_sk_laddr_hashfn(sap, laddr)];
89}
90
62#define LLC_DEST_INVALID 0 /* Invalid LLC PDU type */ 91#define LLC_DEST_INVALID 0 /* Invalid LLC PDU type */
63#define LLC_DEST_SAP 1 /* Type 1 goes here */ 92#define LLC_DEST_SAP 1 /* Type 1 goes here */
64#define LLC_DEST_CONN 2 /* Type 2 goes here */ 93#define LLC_DEST_CONN 2 /* Type 2 goes here */
65 94
66extern struct list_head llc_sap_list; 95extern struct list_head llc_sap_list;
67extern rwlock_t llc_sap_list_lock; 96extern spinlock_t llc_sap_list_lock;
68 97
69extern int llc_rcv(struct sk_buff *skb, struct net_device *dev, 98extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
70 struct packet_type *pt, struct net_device *orig_dev); 99 struct packet_type *pt, struct net_device *orig_dev);
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index e2374e34989..2f97d8ddce9 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -76,6 +76,8 @@ struct llc_sock {
76 u32 rx_pdu_hdr; /* used for saving header of last pdu 76 u32 rx_pdu_hdr; /* used for saving header of last pdu
77 received and caused sending FRMR. 77 received and caused sending FRMR.
78 Used for resending FRMR */ 78 Used for resending FRMR */
79 u32 cmsg_flags;
80 struct hlist_node dev_hash_node;
79}; 81};
80 82
81static inline struct llc_sock *llc_sk(const struct sock *sk) 83static inline struct llc_sock *llc_sk(const struct sock *sk)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 0bf36975227..c90047de442 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -107,12 +107,14 @@ enum ieee80211_max_queues {
107 * 2^n-1 in the range 1..32767] 107 * 2^n-1 in the range 1..32767]
108 * @cw_max: maximum contention window [like @cw_min] 108 * @cw_max: maximum contention window [like @cw_min]
109 * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled 109 * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled
110 * @uapsd: is U-APSD mode enabled for the queue
110 */ 111 */
111struct ieee80211_tx_queue_params { 112struct ieee80211_tx_queue_params {
112 u16 txop; 113 u16 txop;
113 u16 cw_min; 114 u16 cw_min;
114 u16 cw_max; 115 u16 cw_max;
115 u8 aifs; 116 u8 aifs;
117 bool uapsd;
116}; 118};
117 119
118/** 120/**
@@ -255,9 +257,6 @@ struct ieee80211_bss_conf {
255 * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be 257 * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be
256 * set by rate control algorithms to indicate probe rate, will 258 * set by rate control algorithms to indicate probe rate, will
257 * be cleared for fragmented frames (except on the last fragment) 259 * be cleared for fragmented frames (except on the last fragment)
258 * @IEEE80211_TX_INTFL_RCALGO: mac80211 internal flag, do not test or
259 * set this flag in the driver; indicates that the rate control
260 * algorithm was used and should be notified of TX status
261 * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211, 260 * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
262 * used to indicate that a pending frame requires TX processing before 261 * used to indicate that a pending frame requires TX processing before
263 * it can be sent out. 262 * it can be sent out.
@@ -287,7 +286,6 @@ enum mac80211_tx_control_flags {
287 IEEE80211_TX_STAT_AMPDU = BIT(10), 286 IEEE80211_TX_STAT_AMPDU = BIT(10),
288 IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11), 287 IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
289 IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12), 288 IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
290 IEEE80211_TX_INTFL_RCALGO = BIT(13),
291 IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14), 289 IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
292 IEEE80211_TX_INTFL_RETRIED = BIT(15), 290 IEEE80211_TX_INTFL_RETRIED = BIT(15),
293 IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16), 291 IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
@@ -571,7 +569,13 @@ struct ieee80211_rx_status {
571 * @IEEE80211_CONF_MONITOR: there's a monitor interface present -- use this 569 * @IEEE80211_CONF_MONITOR: there's a monitor interface present -- use this
572 * to determine for example whether to calculate timestamps for packets 570 * to determine for example whether to calculate timestamps for packets
573 * or not, do not use instead of filter flags! 571 * or not, do not use instead of filter flags!
574 * @IEEE80211_CONF_PS: Enable 802.11 power save mode (managed mode only) 572 * @IEEE80211_CONF_PS: Enable 802.11 power save mode (managed mode only).
573 * This is the power save mode defined by IEEE 802.11-2007 section 11.2,
574 * meaning that the hardware still wakes up for beacons, is able to
575 * transmit frames and receive the possible acknowledgment frames.
576 * Not to be confused with hardware specific wakeup/sleep states,
577 * driver is responsible for that. See the section "Powersave support"
578 * for more.
575 * @IEEE80211_CONF_IDLE: The device is running, but idle; if the flag is set 579 * @IEEE80211_CONF_IDLE: The device is running, but idle; if the flag is set
576 * the driver should be prepared to handle configuration requests but 580 * the driver should be prepared to handle configuration requests but
577 * may turn the device off as much as possible. Typically, this flag will 581 * may turn the device off as much as possible. Typically, this flag will
@@ -595,8 +599,10 @@ enum ieee80211_conf_flags {
595 * @IEEE80211_CONF_CHANGE_CHANNEL: the channel/channel_type changed 599 * @IEEE80211_CONF_CHANGE_CHANNEL: the channel/channel_type changed
596 * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed 600 * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed
597 * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed 601 * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed
602 * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed
598 */ 603 */
599enum ieee80211_conf_changed { 604enum ieee80211_conf_changed {
605 IEEE80211_CONF_CHANGE_SMPS = BIT(1),
600 IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = BIT(2), 606 IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = BIT(2),
601 IEEE80211_CONF_CHANGE_MONITOR = BIT(3), 607 IEEE80211_CONF_CHANGE_MONITOR = BIT(3),
602 IEEE80211_CONF_CHANGE_PS = BIT(4), 608 IEEE80211_CONF_CHANGE_PS = BIT(4),
@@ -607,6 +613,25 @@ enum ieee80211_conf_changed {
607}; 613};
608 614
609/** 615/**
616 * enum ieee80211_smps_mode - spatial multiplexing power save mode
617 *
618 * @IEEE80211_SMPS_AUTOMATIC: automatic
619 * @IEEE80211_SMPS_OFF: off
620 * @IEEE80211_SMPS_STATIC: static
621 * @IEEE80211_SMPS_DYNAMIC: dynamic
622 * @IEEE80211_SMPS_NUM_MODES: internal, don't use
623 */
624enum ieee80211_smps_mode {
625 IEEE80211_SMPS_AUTOMATIC,
626 IEEE80211_SMPS_OFF,
627 IEEE80211_SMPS_STATIC,
628 IEEE80211_SMPS_DYNAMIC,
629
630 /* keep last */
631 IEEE80211_SMPS_NUM_MODES,
632};
633
634/**
610 * struct ieee80211_conf - configuration of the device 635 * struct ieee80211_conf - configuration of the device
611 * 636 *
612 * This struct indicates how the driver shall configure the hardware. 637 * This struct indicates how the driver shall configure the hardware.
@@ -634,6 +659,10 @@ enum ieee80211_conf_changed {
634 * @short_frame_max_tx_count: Maximum number of transmissions for a "short" 659 * @short_frame_max_tx_count: Maximum number of transmissions for a "short"
635 * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the 660 * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
636 * number of transmissions not the number of retries 661 * number of transmissions not the number of retries
662 *
663 * @smps_mode: spatial multiplexing powersave mode; note that
664 * %IEEE80211_SMPS_STATIC is used when the device is not
665 * configured for an HT channel
637 */ 666 */
638struct ieee80211_conf { 667struct ieee80211_conf {
639 u32 flags; 668 u32 flags;
@@ -646,6 +675,7 @@ struct ieee80211_conf {
646 675
647 struct ieee80211_channel *channel; 676 struct ieee80211_channel *channel;
648 enum nl80211_channel_type channel_type; 677 enum nl80211_channel_type channel_type;
678 enum ieee80211_smps_mode smps_mode;
649}; 679};
650 680
651/** 681/**
@@ -657,12 +687,14 @@ struct ieee80211_conf {
657 * @type: type of this virtual interface 687 * @type: type of this virtual interface
658 * @bss_conf: BSS configuration for this interface, either our own 688 * @bss_conf: BSS configuration for this interface, either our own
659 * or the BSS we're associated to 689 * or the BSS we're associated to
690 * @addr: address of this interface
660 * @drv_priv: data area for driver use, will always be aligned to 691 * @drv_priv: data area for driver use, will always be aligned to
661 * sizeof(void *). 692 * sizeof(void *).
662 */ 693 */
663struct ieee80211_vif { 694struct ieee80211_vif {
664 enum nl80211_iftype type; 695 enum nl80211_iftype type;
665 struct ieee80211_bss_conf bss_conf; 696 struct ieee80211_bss_conf bss_conf;
697 u8 addr[ETH_ALEN];
666 /* must be last */ 698 /* must be last */
667 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); 699 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
668}; 700};
@@ -676,33 +708,6 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
676} 708}
677 709
678/** 710/**
679 * struct ieee80211_if_init_conf - initial configuration of an interface
680 *
681 * @vif: pointer to a driver-use per-interface structure. The pointer
682 * itself is also used for various functions including
683 * ieee80211_beacon_get() and ieee80211_get_buffered_bc().
684 * @type: one of &enum nl80211_iftype constants. Determines the type of
685 * added/removed interface.
686 * @mac_addr: pointer to MAC address of the interface. This pointer is valid
687 * until the interface is removed (i.e. it cannot be used after
688 * remove_interface() callback was called for this interface).
689 *
690 * This structure is used in add_interface() and remove_interface()
691 * callbacks of &struct ieee80211_hw.
692 *
693 * When you allow multiple interfaces to be added to your PHY, take care
694 * that the hardware can actually handle multiple MAC addresses. However,
695 * also take care that when there's no interface left with mac_addr != %NULL
696 * you remove the MAC address from the device to avoid acknowledging packets
697 * in pure monitor mode.
698 */
699struct ieee80211_if_init_conf {
700 enum nl80211_iftype type;
701 struct ieee80211_vif *vif;
702 void *mac_addr;
703};
704
705/**
706 * enum ieee80211_key_alg - key algorithm 711 * enum ieee80211_key_alg - key algorithm
707 * @ALG_WEP: WEP40 or WEP104 712 * @ALG_WEP: WEP40 or WEP104
708 * @ALG_TKIP: TKIP 713 * @ALG_TKIP: TKIP
@@ -926,6 +931,21 @@ enum ieee80211_tkip_key_type {
926 * @IEEE80211_HW_BEACON_FILTER: 931 * @IEEE80211_HW_BEACON_FILTER:
927 * Hardware supports dropping of irrelevant beacon frames to 932 * Hardware supports dropping of irrelevant beacon frames to
928 * avoid waking up cpu. 933 * avoid waking up cpu.
934 *
935 * @IEEE80211_HW_SUPPORTS_STATIC_SMPS:
936 * Hardware supports static spatial multiplexing powersave,
937 * ie. can turn off all but one chain even on HT connections
938 * that should be using more chains.
939 *
940 * @IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS:
941 * Hardware supports dynamic spatial multiplexing powersave,
942 * ie. can turn off all but one chain and then wake the rest
943 * up as required after, for example, rts/cts handshake.
944 *
945 * @IEEE80211_HW_SUPPORTS_UAPSD:
946 * Hardware supports Unscheduled Automatic Power Save Delivery
947 * (U-APSD) in managed mode. The mode is configured with
948 * conf_tx() operation.
929 */ 949 */
930enum ieee80211_hw_flags { 950enum ieee80211_hw_flags {
931 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, 951 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -943,6 +963,9 @@ enum ieee80211_hw_flags {
943 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, 963 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
944 IEEE80211_HW_MFP_CAPABLE = 1<<13, 964 IEEE80211_HW_MFP_CAPABLE = 1<<13,
945 IEEE80211_HW_BEACON_FILTER = 1<<14, 965 IEEE80211_HW_BEACON_FILTER = 1<<14,
966 IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15,
967 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
968 IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
946}; 969};
947 970
948/** 971/**
@@ -1121,18 +1144,24 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1121 * 1144 *
1122 * mac80211 has support for various powersave implementations. 1145 * mac80211 has support for various powersave implementations.
1123 * 1146 *
1124 * First, it can support hardware that handles all powersaving by 1147 * First, it can support hardware that handles all powersaving by itself,
1125 * itself, such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS 1148 * such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS hardware
1126 * hardware flag. In that case, it will be told about the desired 1149 * flag. In that case, it will be told about the desired powersave mode
1127 * powersave mode depending on the association status, and the driver 1150 * with the %IEEE80211_CONF_PS flag depending on the association status.
1128 * must take care of sending nullfunc frames when necessary, i.e. when 1151 * The hardware must take care of sending nullfunc frames when necessary,
1129 * entering and leaving powersave mode. The driver is required to look at 1152 * i.e. when entering and leaving powersave mode. The hardware is required
1130 * the AID in beacons and signal to the AP that it woke up when it finds 1153 * to look at the AID in beacons and signal to the AP that it woke up when
1131 * traffic directed to it. This mode supports dynamic PS by simply 1154 * it finds traffic directed to it.
1132 * enabling/disabling PS. 1155 *
1133 * 1156 * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in
1134 * Additionally, such hardware may set the %IEEE80211_HW_SUPPORTS_DYNAMIC_PS 1157 * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused
1135 * flag to indicate that it can support dynamic PS mode itself (see below). 1158 * with hardware wakeup and sleep states. Driver is responsible for waking
1159 * up the hardware before issueing commands to the hardware and putting it
1160 * back to sleep at approriate times.
1161 *
1162 * When PS is enabled, hardware needs to wakeup for beacons and receive the
1163 * buffered multicast/broadcast frames after the beacon. Also it must be
1164 * possible to send frames and receive the acknowledment frame.
1136 * 1165 *
1137 * Other hardware designs cannot send nullfunc frames by themselves and also 1166 * Other hardware designs cannot send nullfunc frames by themselves and also
1138 * need software support for parsing the TIM bitmap. This is also supported 1167 * need software support for parsing the TIM bitmap. This is also supported
@@ -1140,14 +1169,35 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1140 * %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still 1169 * %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still
1141 * required to pass up beacons. The hardware is still required to handle 1170 * required to pass up beacons. The hardware is still required to handle
1142 * waking up for multicast traffic; if it cannot the driver must handle that 1171 * waking up for multicast traffic; if it cannot the driver must handle that
1143 * as best as it can, mac80211 is too slow. 1172 * as best as it can, mac80211 is too slow to do that.
1144 * 1173 *
1145 * Dynamic powersave mode is an extension to normal powersave mode in which 1174 * Dynamic powersave is an extension to normal powersave in which the
1146 * the hardware stays awake for a user-specified period of time after sending 1175 * hardware stays awake for a user-specified period of time after sending a
1147 * a frame so that reply frames need not be buffered and therefore delayed 1176 * frame so that reply frames need not be buffered and therefore delayed to
1148 * to the next wakeup. This can either be supported by hardware, in which case 1177 * the next wakeup. It's compromise of getting good enough latency when
1149 * the driver needs to look at the @dynamic_ps_timeout hardware configuration 1178 * there's data traffic and still saving significantly power in idle
1150 * value, or by the stack if all nullfunc handling is in the stack. 1179 * periods.
1180 *
1181 * Dynamic powersave is supported by simply mac80211 enabling and disabling
1182 * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS
1183 * flag and mac80211 will handle everything automatically. Additionally,
1184 * hardware having support for the dynamic PS feature may set the
1185 * %IEEE80211_HW_SUPPORTS_DYNAMIC_PS flag to indicate that it can support
1186 * dynamic PS mode itself. The driver needs to look at the
1187 * @dynamic_ps_timeout hardware configuration value and use it that value
1188 * whenever %IEEE80211_CONF_PS is set. In this case mac80211 will disable
1189 * dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS
1190 * enabled whenever user has enabled powersave.
1191 *
1192 * Driver informs U-APSD client support by enabling
1193 * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
1194 * uapsd paramater in conf_tx() operation. Hardware needs to send the QoS
1195 * Nullfunc frames and stay awake until the service period has ended. To
1196 * utilize U-APSD, dynamic powersave is disabled for voip AC and all frames
1197 * from that AC are transmitted with powersave enabled.
1198 *
1199 * Note: U-APSD client mode is not yet supported with
1200 * %IEEE80211_HW_PS_NULLFUNC_STACK.
1151 */ 1201 */
1152 1202
1153/** 1203/**
@@ -1211,6 +1261,31 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1211 */ 1261 */
1212 1262
1213/** 1263/**
1264 * DOC: Spatial multiplexing power save
1265 *
1266 * SMPS (Spatial multiplexing power save) is a mechanism to conserve
1267 * power in an 802.11n implementation. For details on the mechanism
1268 * and rationale, please refer to 802.11 (as amended by 802.11n-2009)
1269 * "11.2.3 SM power save".
1270 *
1271 * The mac80211 implementation is capable of sending action frames
1272 * to update the AP about the station's SMPS mode, and will instruct
1273 * the driver to enter the specific mode. It will also announce the
1274 * requested SMPS mode during the association handshake. Hardware
1275 * support for this feature is required, and can be indicated by
1276 * hardware flags.
1277 *
1278 * The default mode will be "automatic", which nl80211/cfg80211
1279 * defines to be dynamic SMPS in (regular) powersave, and SMPS
1280 * turned off otherwise.
1281 *
1282 * To support this feature, the driver must set the appropriate
1283 * hardware support flags, and handle the SMPS flag to the config()
1284 * operation. It will then with this mechanism be instructed to
1285 * enter the requested SMPS mode while associated to an HT AP.
1286 */
1287
1288/**
1214 * DOC: Frame filtering 1289 * DOC: Frame filtering
1215 * 1290 *
1216 * mac80211 requires to see many management frames for proper 1291 * mac80211 requires to see many management frames for proper
@@ -1347,7 +1422,7 @@ enum ieee80211_ampdu_mlme_action {
1347 * When the device is started it should not have a MAC address 1422 * When the device is started it should not have a MAC address
1348 * to avoid acknowledging frames before a non-monitor device 1423 * to avoid acknowledging frames before a non-monitor device
1349 * is added. 1424 * is added.
1350 * Must be implemented. 1425 * Must be implemented and can sleep.
1351 * 1426 *
1352 * @stop: Called after last netdevice attached to the hardware 1427 * @stop: Called after last netdevice attached to the hardware
1353 * is disabled. This should turn off the hardware (at least 1428 * is disabled. This should turn off the hardware (at least
@@ -1355,7 +1430,7 @@ enum ieee80211_ampdu_mlme_action {
1355 * May be called right after add_interface if that rejects 1430 * May be called right after add_interface if that rejects
1356 * an interface. If you added any work onto the mac80211 workqueue 1431 * an interface. If you added any work onto the mac80211 workqueue
1357 * you should ensure to cancel it on this callback. 1432 * you should ensure to cancel it on this callback.
1358 * Must be implemented. 1433 * Must be implemented and can sleep.
1359 * 1434 *
1360 * @add_interface: Called when a netdevice attached to the hardware is 1435 * @add_interface: Called when a netdevice attached to the hardware is
1361 * enabled. Because it is not called for monitor mode devices, @start 1436 * enabled. Because it is not called for monitor mode devices, @start
@@ -1365,7 +1440,7 @@ enum ieee80211_ampdu_mlme_action {
1365 * interface is given in the conf parameter. 1440 * interface is given in the conf parameter.
1366 * The callback may refuse to add an interface by returning a 1441 * The callback may refuse to add an interface by returning a
1367 * negative error code (which will be seen in userspace.) 1442 * negative error code (which will be seen in userspace.)
1368 * Must be implemented. 1443 * Must be implemented and can sleep.
1369 * 1444 *
1370 * @remove_interface: Notifies a driver that an interface is going down. 1445 * @remove_interface: Notifies a driver that an interface is going down.
1371 * The @stop callback is called after this if it is the last interface 1446 * The @stop callback is called after this if it is the last interface
@@ -1374,19 +1449,20 @@ enum ieee80211_ampdu_mlme_action {
1374 * must be cleared so the device no longer acknowledges packets, 1449 * must be cleared so the device no longer acknowledges packets,
1375 * the mac_addr member of the conf structure is, however, set to the 1450 * the mac_addr member of the conf structure is, however, set to the
1376 * MAC address of the device going away. 1451 * MAC address of the device going away.
1377 * Hence, this callback must be implemented. 1452 * Hence, this callback must be implemented. It can sleep.
1378 * 1453 *
1379 * @config: Handler for configuration requests. IEEE 802.11 code calls this 1454 * @config: Handler for configuration requests. IEEE 802.11 code calls this
1380 * function to change hardware configuration, e.g., channel. 1455 * function to change hardware configuration, e.g., channel.
1381 * This function should never fail but returns a negative error code 1456 * This function should never fail but returns a negative error code
1382 * if it does. 1457 * if it does. The callback can sleep.
1383 * 1458 *
1384 * @bss_info_changed: Handler for configuration requests related to BSS 1459 * @bss_info_changed: Handler for configuration requests related to BSS
1385 * parameters that may vary during BSS's lifespan, and may affect low 1460 * parameters that may vary during BSS's lifespan, and may affect low
1386 * level driver (e.g. assoc/disassoc status, erp parameters). 1461 * level driver (e.g. assoc/disassoc status, erp parameters).
1387 * This function should not be used if no BSS has been set, unless 1462 * This function should not be used if no BSS has been set, unless
1388 * for association indication. The @changed parameter indicates which 1463 * for association indication. The @changed parameter indicates which
1389 * of the bss parameters has changed when a call is made. 1464 * of the bss parameters has changed when a call is made. The callback
1465 * can sleep.
1390 * 1466 *
1391 * @prepare_multicast: Prepare for multicast filter configuration. 1467 * @prepare_multicast: Prepare for multicast filter configuration.
1392 * This callback is optional, and its return value is passed 1468 * This callback is optional, and its return value is passed
@@ -1394,20 +1470,22 @@ enum ieee80211_ampdu_mlme_action {
1394 * 1470 *
1395 * @configure_filter: Configure the device's RX filter. 1471 * @configure_filter: Configure the device's RX filter.
1396 * See the section "Frame filtering" for more information. 1472 * See the section "Frame filtering" for more information.
1397 * This callback must be implemented. 1473 * This callback must be implemented and can sleep.
1398 * 1474 *
1399 * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit 1475 * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
1400 * must be set or cleared for a given STA. Must be atomic. 1476 * must be set or cleared for a given STA. Must be atomic.
1401 * 1477 *
1402 * @set_key: See the section "Hardware crypto acceleration" 1478 * @set_key: See the section "Hardware crypto acceleration"
1403 * This callback can sleep, and is only called between add_interface 1479 * This callback is only called between add_interface and
1404 * and remove_interface calls, i.e. while the given virtual interface 1480 * remove_interface calls, i.e. while the given virtual interface
1405 * is enabled. 1481 * is enabled.
1406 * Returns a negative error code if the key can't be added. 1482 * Returns a negative error code if the key can't be added.
1483 * The callback can sleep.
1407 * 1484 *
1408 * @update_tkip_key: See the section "Hardware crypto acceleration" 1485 * @update_tkip_key: See the section "Hardware crypto acceleration"
1409 * This callback will be called in the context of Rx. Called for drivers 1486 * This callback will be called in the context of Rx. Called for drivers
1410 * which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY. 1487 * which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY.
1488 * The callback can sleep.
1411 * 1489 *
1412 * @hw_scan: Ask the hardware to service the scan request, no need to start 1490 * @hw_scan: Ask the hardware to service the scan request, no need to start
1413 * the scan state machine in stack. The scan must honour the channel 1491 * the scan state machine in stack. The scan must honour the channel
@@ -1421,21 +1499,28 @@ enum ieee80211_ampdu_mlme_action {
1421 * When the scan finishes, ieee80211_scan_completed() must be called; 1499 * When the scan finishes, ieee80211_scan_completed() must be called;
1422 * note that it also must be called when the scan cannot finish due to 1500 * note that it also must be called when the scan cannot finish due to
1423 * any error unless this callback returned a negative error code. 1501 * any error unless this callback returned a negative error code.
1502 * The callback can sleep.
1424 * 1503 *
1425 * @sw_scan_start: Notifier function that is called just before a software scan 1504 * @sw_scan_start: Notifier function that is called just before a software scan
1426 * is started. Can be NULL, if the driver doesn't need this notification. 1505 * is started. Can be NULL, if the driver doesn't need this notification.
1506 * The callback can sleep.
1427 * 1507 *
1428 * @sw_scan_complete: Notifier function that is called just after a software scan 1508 * @sw_scan_complete: Notifier function that is called just after a
1429 * finished. Can be NULL, if the driver doesn't need this notification. 1509 * software scan finished. Can be NULL, if the driver doesn't need
1510 * this notification.
1511 * The callback can sleep.
1430 * 1512 *
1431 * @get_stats: Return low-level statistics. 1513 * @get_stats: Return low-level statistics.
1432 * Returns zero if statistics are available. 1514 * Returns zero if statistics are available.
1515 * The callback can sleep.
1433 * 1516 *
1434 * @get_tkip_seq: If your device implements TKIP encryption in hardware this 1517 * @get_tkip_seq: If your device implements TKIP encryption in hardware this
1435 * callback should be provided to read the TKIP transmit IVs (both IV32 1518 * callback should be provided to read the TKIP transmit IVs (both IV32
1436 * and IV16) for the given key from hardware. 1519 * and IV16) for the given key from hardware.
1520 * The callback must be atomic.
1437 * 1521 *
1438 * @set_rts_threshold: Configuration of RTS threshold (if device needs it) 1522 * @set_rts_threshold: Configuration of RTS threshold (if device needs it)
1523 * The callback can sleep.
1439 * 1524 *
1440 * @sta_notify: Notifies low level driver about addition, removal or power 1525 * @sta_notify: Notifies low level driver about addition, removal or power
1441 * state transition of an associated station, AP, IBSS/WDS/mesh peer etc. 1526 * state transition of an associated station, AP, IBSS/WDS/mesh peer etc.
@@ -1444,30 +1529,36 @@ enum ieee80211_ampdu_mlme_action {
1444 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), 1529 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
1445 * bursting) for a hardware TX queue. 1530 * bursting) for a hardware TX queue.
1446 * Returns a negative error code on failure. 1531 * Returns a negative error code on failure.
1532 * The callback can sleep.
1447 * 1533 *
1448 * @get_tx_stats: Get statistics of the current TX queue status. This is used 1534 * @get_tx_stats: Get statistics of the current TX queue status. This is used
1449 * to get number of currently queued packets (queue length), maximum queue 1535 * to get number of currently queued packets (queue length), maximum queue
1450 * size (limit), and total number of packets sent using each TX queue 1536 * size (limit), and total number of packets sent using each TX queue
1451 * (count). The 'stats' pointer points to an array that has hw->queues 1537 * (count). The 'stats' pointer points to an array that has hw->queues
1452 * items. 1538 * items.
1539 * The callback must be atomic.
1453 * 1540 *
1454 * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently, 1541 * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently,
1455 * this is only used for IBSS mode BSSID merging and debugging. Is not a 1542 * this is only used for IBSS mode BSSID merging and debugging. Is not a
1456 * required function. 1543 * required function.
1544 * The callback can sleep.
1457 * 1545 *
1458 * @set_tsf: Set the TSF timer to the specified value in the firmware/hardware. 1546 * @set_tsf: Set the TSF timer to the specified value in the firmware/hardware.
1459 * Currently, this is only used for IBSS mode debugging. Is not a 1547 * Currently, this is only used for IBSS mode debugging. Is not a
1460 * required function. 1548 * required function.
1549 * The callback can sleep.
1461 * 1550 *
1462 * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize 1551 * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize
1463 * with other STAs in the IBSS. This is only used in IBSS mode. This 1552 * with other STAs in the IBSS. This is only used in IBSS mode. This
1464 * function is optional if the firmware/hardware takes full care of 1553 * function is optional if the firmware/hardware takes full care of
1465 * TSF synchronization. 1554 * TSF synchronization.
1555 * The callback can sleep.
1466 * 1556 *
1467 * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us. 1557 * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us.
1468 * This is needed only for IBSS mode and the result of this function is 1558 * This is needed only for IBSS mode and the result of this function is
1469 * used to determine whether to reply to Probe Requests. 1559 * used to determine whether to reply to Probe Requests.
1470 * Returns non-zero if this device sent the last beacon. 1560 * Returns non-zero if this device sent the last beacon.
1561 * The callback can sleep.
1471 * 1562 *
1472 * @ampdu_action: Perform a certain A-MPDU action 1563 * @ampdu_action: Perform a certain A-MPDU action
1473 * The RA/TID combination determines the destination and TID we want 1564 * The RA/TID combination determines the destination and TID we want
@@ -1476,21 +1567,32 @@ enum ieee80211_ampdu_mlme_action {
1476 * is the first frame we expect to perform the action on. Notice 1567 * is the first frame we expect to perform the action on. Notice
1477 * that TX/RX_STOP can pass NULL for this parameter. 1568 * that TX/RX_STOP can pass NULL for this parameter.
1478 * Returns a negative error code on failure. 1569 * Returns a negative error code on failure.
1570 * The callback must be atomic.
1479 * 1571 *
1480 * @rfkill_poll: Poll rfkill hardware state. If you need this, you also 1572 * @rfkill_poll: Poll rfkill hardware state. If you need this, you also
1481 * need to set wiphy->rfkill_poll to %true before registration, 1573 * need to set wiphy->rfkill_poll to %true before registration,
1482 * and need to call wiphy_rfkill_set_hw_state() in the callback. 1574 * and need to call wiphy_rfkill_set_hw_state() in the callback.
1575 * The callback can sleep.
1576 *
1577 * @set_coverage_class: Set slot time for given coverage class as specified
1578 * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
1579 * accordingly. This callback is not required and may sleep.
1483 * 1580 *
1484 * @testmode_cmd: Implement a cfg80211 test mode command. 1581 * @testmode_cmd: Implement a cfg80211 test mode command.
1582 * The callback can sleep.
1583 *
1584 * @flush: Flush all pending frames from the hardware queue, making sure
1585 * that the hardware queues are empty. If the parameter @drop is set
1586 * to %true, pending frames may be dropped. The callback can sleep.
1485 */ 1587 */
1486struct ieee80211_ops { 1588struct ieee80211_ops {
1487 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 1589 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
1488 int (*start)(struct ieee80211_hw *hw); 1590 int (*start)(struct ieee80211_hw *hw);
1489 void (*stop)(struct ieee80211_hw *hw); 1591 void (*stop)(struct ieee80211_hw *hw);
1490 int (*add_interface)(struct ieee80211_hw *hw, 1592 int (*add_interface)(struct ieee80211_hw *hw,
1491 struct ieee80211_if_init_conf *conf); 1593 struct ieee80211_vif *vif);
1492 void (*remove_interface)(struct ieee80211_hw *hw, 1594 void (*remove_interface)(struct ieee80211_hw *hw,
1493 struct ieee80211_if_init_conf *conf); 1595 struct ieee80211_vif *vif);
1494 int (*config)(struct ieee80211_hw *hw, u32 changed); 1596 int (*config)(struct ieee80211_hw *hw, u32 changed);
1495 void (*bss_info_changed)(struct ieee80211_hw *hw, 1597 void (*bss_info_changed)(struct ieee80211_hw *hw,
1496 struct ieee80211_vif *vif, 1598 struct ieee80211_vif *vif,
@@ -1535,9 +1637,11 @@ struct ieee80211_ops {
1535 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 1637 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
1536 1638
1537 void (*rfkill_poll)(struct ieee80211_hw *hw); 1639 void (*rfkill_poll)(struct ieee80211_hw *hw);
1640 void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
1538#ifdef CONFIG_NL80211_TESTMODE 1641#ifdef CONFIG_NL80211_TESTMODE
1539 int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len); 1642 int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len);
1540#endif 1643#endif
1644 void (*flush)(struct ieee80211_hw *hw, bool drop);
1541}; 1645};
1542 1646
1543/** 1647/**
@@ -1777,7 +1881,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
1777/** 1881/**
1778 * ieee80211_beacon_get_tim - beacon generation function 1882 * ieee80211_beacon_get_tim - beacon generation function
1779 * @hw: pointer obtained from ieee80211_alloc_hw(). 1883 * @hw: pointer obtained from ieee80211_alloc_hw().
1780 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 1884 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1781 * @tim_offset: pointer to variable that will receive the TIM IE offset. 1885 * @tim_offset: pointer to variable that will receive the TIM IE offset.
1782 * Set to 0 if invalid (in non-AP modes). 1886 * Set to 0 if invalid (in non-AP modes).
1783 * @tim_length: pointer to variable that will receive the TIM IE length, 1887 * @tim_length: pointer to variable that will receive the TIM IE length,
@@ -1805,7 +1909,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
1805/** 1909/**
1806 * ieee80211_beacon_get - beacon generation function 1910 * ieee80211_beacon_get - beacon generation function
1807 * @hw: pointer obtained from ieee80211_alloc_hw(). 1911 * @hw: pointer obtained from ieee80211_alloc_hw().
1808 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 1912 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1809 * 1913 *
1810 * See ieee80211_beacon_get_tim(). 1914 * See ieee80211_beacon_get_tim().
1811 */ 1915 */
@@ -1816,9 +1920,56 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1816} 1920}
1817 1921
1818/** 1922/**
1923 * ieee80211_pspoll_get - retrieve a PS Poll template
1924 * @hw: pointer obtained from ieee80211_alloc_hw().
1925 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1926 *
1927 * Creates a PS Poll a template which can, for example, uploaded to
1928 * hardware. The template must be updated after association so that correct
1929 * AID, BSSID and MAC address is used.
1930 *
1931 * Note: Caller (or hardware) is responsible for setting the
1932 * &IEEE80211_FCTL_PM bit.
1933 */
1934struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
1935 struct ieee80211_vif *vif);
1936
1937/**
1938 * ieee80211_nullfunc_get - retrieve a nullfunc template
1939 * @hw: pointer obtained from ieee80211_alloc_hw().
1940 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1941 *
1942 * Creates a Nullfunc template which can, for example, uploaded to
1943 * hardware. The template must be updated after association so that correct
1944 * BSSID and address is used.
1945 *
1946 * Note: Caller (or hardware) is responsible for setting the
1947 * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
1948 */
1949struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
1950 struct ieee80211_vif *vif);
1951
1952/**
1953 * ieee80211_probereq_get - retrieve a Probe Request template
1954 * @hw: pointer obtained from ieee80211_alloc_hw().
1955 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1956 * @ssid: SSID buffer
1957 * @ssid_len: length of SSID
1958 * @ie: buffer containing all IEs except SSID for the template
1959 * @ie_len: length of the IE buffer
1960 *
1961 * Creates a Probe Request template which can, for example, be uploaded to
1962 * hardware.
1963 */
1964struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
1965 struct ieee80211_vif *vif,
1966 const u8 *ssid, size_t ssid_len,
1967 const u8 *ie, size_t ie_len);
1968
1969/**
1819 * ieee80211_rts_get - RTS frame generation function 1970 * ieee80211_rts_get - RTS frame generation function
1820 * @hw: pointer obtained from ieee80211_alloc_hw(). 1971 * @hw: pointer obtained from ieee80211_alloc_hw().
1821 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 1972 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1822 * @frame: pointer to the frame that is going to be protected by the RTS. 1973 * @frame: pointer to the frame that is going to be protected by the RTS.
1823 * @frame_len: the frame length (in octets). 1974 * @frame_len: the frame length (in octets).
1824 * @frame_txctl: &struct ieee80211_tx_info of the frame. 1975 * @frame_txctl: &struct ieee80211_tx_info of the frame.
@@ -1837,7 +1988,7 @@ void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1837/** 1988/**
1838 * ieee80211_rts_duration - Get the duration field for an RTS frame 1989 * ieee80211_rts_duration - Get the duration field for an RTS frame
1839 * @hw: pointer obtained from ieee80211_alloc_hw(). 1990 * @hw: pointer obtained from ieee80211_alloc_hw().
1840 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 1991 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1841 * @frame_len: the length of the frame that is going to be protected by the RTS. 1992 * @frame_len: the length of the frame that is going to be protected by the RTS.
1842 * @frame_txctl: &struct ieee80211_tx_info of the frame. 1993 * @frame_txctl: &struct ieee80211_tx_info of the frame.
1843 * 1994 *
@@ -1852,7 +2003,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
1852/** 2003/**
1853 * ieee80211_ctstoself_get - CTS-to-self frame generation function 2004 * ieee80211_ctstoself_get - CTS-to-self frame generation function
1854 * @hw: pointer obtained from ieee80211_alloc_hw(). 2005 * @hw: pointer obtained from ieee80211_alloc_hw().
1855 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 2006 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1856 * @frame: pointer to the frame that is going to be protected by the CTS-to-self. 2007 * @frame: pointer to the frame that is going to be protected by the CTS-to-self.
1857 * @frame_len: the frame length (in octets). 2008 * @frame_len: the frame length (in octets).
1858 * @frame_txctl: &struct ieee80211_tx_info of the frame. 2009 * @frame_txctl: &struct ieee80211_tx_info of the frame.
@@ -1872,7 +2023,7 @@ void ieee80211_ctstoself_get(struct ieee80211_hw *hw,
1872/** 2023/**
1873 * ieee80211_ctstoself_duration - Get the duration field for a CTS-to-self frame 2024 * ieee80211_ctstoself_duration - Get the duration field for a CTS-to-self frame
1874 * @hw: pointer obtained from ieee80211_alloc_hw(). 2025 * @hw: pointer obtained from ieee80211_alloc_hw().
1875 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 2026 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1876 * @frame_len: the length of the frame that is going to be protected by the CTS-to-self. 2027 * @frame_len: the length of the frame that is going to be protected by the CTS-to-self.
1877 * @frame_txctl: &struct ieee80211_tx_info of the frame. 2028 * @frame_txctl: &struct ieee80211_tx_info of the frame.
1878 * 2029 *
@@ -1888,7 +2039,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
1888/** 2039/**
1889 * ieee80211_generic_frame_duration - Calculate the duration field for a frame 2040 * ieee80211_generic_frame_duration - Calculate the duration field for a frame
1890 * @hw: pointer obtained from ieee80211_alloc_hw(). 2041 * @hw: pointer obtained from ieee80211_alloc_hw().
1891 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 2042 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1892 * @frame_len: the length of the frame. 2043 * @frame_len: the length of the frame.
1893 * @rate: the rate at which the frame is going to be transmitted. 2044 * @rate: the rate at which the frame is going to be transmitted.
1894 * 2045 *
@@ -1903,7 +2054,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
1903/** 2054/**
1904 * ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames 2055 * ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames
1905 * @hw: pointer as obtained from ieee80211_alloc_hw(). 2056 * @hw: pointer as obtained from ieee80211_alloc_hw().
1906 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 2057 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
1907 * 2058 *
1908 * Function for accessing buffered broadcast and multicast frames. If 2059 * Function for accessing buffered broadcast and multicast frames. If
1909 * hardware/firmware does not implement buffering of broadcast/multicast 2060 * hardware/firmware does not implement buffering of broadcast/multicast
@@ -2071,7 +2222,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
2071 2222
2072/** 2223/**
2073 * ieee80211_start_tx_ba_cb - low level driver ready to aggregate. 2224 * ieee80211_start_tx_ba_cb - low level driver ready to aggregate.
2074 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf 2225 * @vif: &struct ieee80211_vif pointer from the add_interface callback
2075 * @ra: receiver address of the BA session recipient. 2226 * @ra: receiver address of the BA session recipient.
2076 * @tid: the TID to BA on. 2227 * @tid: the TID to BA on.
2077 * 2228 *
@@ -2082,7 +2233,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
2082 2233
2083/** 2234/**
2084 * ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate. 2235 * ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate.
2085 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf 2236 * @vif: &struct ieee80211_vif pointer from the add_interface callback
2086 * @ra: receiver address of the BA session recipient. 2237 * @ra: receiver address of the BA session recipient.
2087 * @tid: the TID to BA on. 2238 * @tid: the TID to BA on.
2088 * 2239 *
@@ -2110,7 +2261,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid,
2110 2261
2111/** 2262/**
2112 * ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate. 2263 * ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate.
2113 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf 2264 * @vif: &struct ieee80211_vif pointer from the add_interface callback
2114 * @ra: receiver address of the BA session recipient. 2265 * @ra: receiver address of the BA session recipient.
2115 * @tid: the desired TID to BA on. 2266 * @tid: the desired TID to BA on.
2116 * 2267 *
@@ -2121,7 +2272,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
2121 2272
2122/** 2273/**
2123 * ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate. 2274 * ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate.
2124 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf 2275 * @vif: &struct ieee80211_vif pointer from the add_interface callback
2125 * @ra: receiver address of the BA session recipient. 2276 * @ra: receiver address of the BA session recipient.
2126 * @tid: the desired TID to BA on. 2277 * @tid: the desired TID to BA on.
2127 * 2278 *
@@ -2200,7 +2351,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
2200/** 2351/**
2201 * ieee80211_beacon_loss - inform hardware does not receive beacons 2352 * ieee80211_beacon_loss - inform hardware does not receive beacons
2202 * 2353 *
2203 * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. 2354 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2204 * 2355 *
2205 * When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and 2356 * When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and
2206 * IEEE80211_CONF_PS is set, the driver needs to inform whenever the 2357 * IEEE80211_CONF_PS is set, the driver needs to inform whenever the
@@ -2234,8 +2385,12 @@ enum rate_control_changed {
2234 * @short_preamble: whether mac80211 will request short-preamble transmission 2385 * @short_preamble: whether mac80211 will request short-preamble transmission
2235 * if the selected rate supports it 2386 * if the selected rate supports it
2236 * @max_rate_idx: user-requested maximum rate (not MCS for now) 2387 * @max_rate_idx: user-requested maximum rate (not MCS for now)
2388 * (deprecated; this will be removed once drivers get updated to use
2389 * rate_idx_mask)
2390 * @rate_idx_mask: user-requested rate mask (not MCS for now)
2237 * @skb: the skb that will be transmitted, the control information in it needs 2391 * @skb: the skb that will be transmitted, the control information in it needs
2238 * to be filled in 2392 * to be filled in
2393 * @ap: whether this frame is sent out in AP mode
2239 */ 2394 */
2240struct ieee80211_tx_rate_control { 2395struct ieee80211_tx_rate_control {
2241 struct ieee80211_hw *hw; 2396 struct ieee80211_hw *hw;
@@ -2245,6 +2400,8 @@ struct ieee80211_tx_rate_control {
2245 struct ieee80211_tx_rate reported_rate; 2400 struct ieee80211_tx_rate reported_rate;
2246 bool rts, short_preamble; 2401 bool rts, short_preamble;
2247 u8 max_rate_idx; 2402 u8 max_rate_idx;
2403 u32 rate_idx_mask;
2404 bool ap;
2248}; 2405};
2249 2406
2250struct rate_control_ops { 2407struct rate_control_ops {
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index 4c61cdce4e5..35672b1cf44 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -44,6 +44,7 @@ struct pep_sock {
44 u8 rx_fc; /* RX flow control */ 44 u8 rx_fc; /* RX flow control */
45 u8 tx_fc; /* TX flow control */ 45 u8 tx_fc; /* TX flow control */
46 u8 init_enable; /* auto-enable at creation */ 46 u8 init_enable; /* auto-enable at creation */
47 u8 aligned;
47}; 48};
48 49
49static inline struct pep_sock *pep_sk(struct sock *sk) 50static inline struct pep_sock *pep_sk(struct sock *sk)
@@ -77,6 +78,7 @@ static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
77 78
78enum { 79enum {
79 PNS_PIPE_DATA = 0x20, 80 PNS_PIPE_DATA = 0x20,
81 PNS_PIPE_ALIGNED_DATA,
80 82
81 PNS_PEP_CONNECT_REQ = 0x40, 83 PNS_PEP_CONNECT_REQ = 0x40,
82 PNS_PEP_CONNECT_RESP, 84 PNS_PEP_CONNECT_RESP,
@@ -138,6 +140,7 @@ enum {
138 PN_PIPE_SB_NEGOTIATED_FC, 140 PN_PIPE_SB_NEGOTIATED_FC,
139 PN_PIPE_SB_REQUIRED_FC_TX, 141 PN_PIPE_SB_REQUIRED_FC_TX,
140 PN_PIPE_SB_PREFERRED_FC_RX, 142 PN_PIPE_SB_PREFERRED_FC_RX,
143 PN_PIPE_SB_ALIGNED_DATA,
141}; 144};
142 145
143/* Phonet pipe flow control models */ 146/* Phonet pipe flow control models */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 2d567265363..b6cdc33b39c 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -71,6 +71,7 @@ extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
71 71
72extern struct Qdisc_ops pfifo_qdisc_ops; 72extern struct Qdisc_ops pfifo_qdisc_ops;
73extern struct Qdisc_ops bfifo_qdisc_ops; 73extern struct Qdisc_ops bfifo_qdisc_ops;
74extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
74 75
75extern int fifo_set_limit(struct Qdisc *q, unsigned int limit); 76extern int fifo_set_limit(struct Qdisc *q, unsigned int limit);
76extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, 77extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index c9b50ebd9ce..99e6e19b57c 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -45,6 +45,8 @@ struct request_sock_ops {
45 void (*send_reset)(struct sock *sk, 45 void (*send_reset)(struct sock *sk,
46 struct sk_buff *skb); 46 struct sk_buff *skb);
47 void (*destructor)(struct request_sock *req); 47 void (*destructor)(struct request_sock *req);
48 void (*syn_ack_timeout)(struct sock *sk,
49 struct request_sock *req);
48}; 50};
49 51
50/* struct request_sock - mini sock to represent a connection request 52/* struct request_sock - mini sock to represent a connection request
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index dad558bc06f..67dc08eaaa4 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -427,6 +427,25 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
427 return __qdisc_dequeue_head(sch, &sch->q); 427 return __qdisc_dequeue_head(sch, &sch->q);
428} 428}
429 429
430static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
431 struct sk_buff_head *list)
432{
433 struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
434
435 if (likely(skb != NULL)) {
436 unsigned int len = qdisc_pkt_len(skb);
437 kfree_skb(skb);
438 return len;
439 }
440
441 return 0;
442}
443
444static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
445{
446 return __qdisc_queue_drop_head(sch, &sch->q);
447}
448
430static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, 449static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
431 struct sk_buff_head *list) 450 struct sk_buff_head *list)
432{ 451{
diff --git a/include/net/snmp.h b/include/net/snmp.h
index f0d756f2ac9..da02ee027d6 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -32,7 +32,7 @@
32 * - name of entries. 32 * - name of entries.
33 */ 33 */
34struct snmp_mib { 34struct snmp_mib {
35 char *name; 35 const char *name;
36 int entry; 36 int entry;
37}; 37};
38 38
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 34f5cc24d90..87d164b9bd8 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -400,6 +400,8 @@ extern int compat_tcp_setsockopt(struct sock *sk,
400 int level, int optname, 400 int level, int optname,
401 char __user *optval, unsigned int optlen); 401 char __user *optval, unsigned int optlen);
402extern void tcp_set_keepalive(struct sock *sk, int val); 402extern void tcp_set_keepalive(struct sock *sk, int val);
403extern void tcp_syn_ack_timeout(struct sock *sk,
404 struct request_sock *req);
403extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, 405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
404 struct msghdr *msg, 406 struct msghdr *msg,
405 size_t len, int nonblock, 407 size_t len, int nonblock,
@@ -856,13 +858,6 @@ static inline void tcp_check_probe_timer(struct sock *sk)
856 icsk->icsk_rto, TCP_RTO_MAX); 858 icsk->icsk_rto, TCP_RTO_MAX);
857} 859}
858 860
859static inline void tcp_push_pending_frames(struct sock *sk)
860{
861 struct tcp_sock *tp = tcp_sk(sk);
862
863 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
864}
865
866static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) 861static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
867{ 862{
868 tp->snd_wl1 = seq; 863 tp->snd_wl1 = seq;
@@ -972,7 +967,8 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
972/* Determine a window scaling and initial window to offer. */ 967/* Determine a window scaling and initial window to offer. */
973extern void tcp_select_initial_window(int __space, __u32 mss, 968extern void tcp_select_initial_window(int __space, __u32 mss,
974 __u32 *rcv_wnd, __u32 *window_clamp, 969 __u32 *rcv_wnd, __u32 *window_clamp,
975 int wscale_ok, __u8 *rcv_wscale); 970 int wscale_ok, __u8 *rcv_wscale,
971 __u32 init_rcv_wnd);
976 972
977static inline int tcp_win_from_space(int space) 973static inline int tcp_win_from_space(int space)
978{ 974{
@@ -1342,6 +1338,15 @@ static inline int tcp_write_queue_empty(struct sock *sk)
1342 return skb_queue_empty(&sk->sk_write_queue); 1338 return skb_queue_empty(&sk->sk_write_queue);
1343} 1339}
1344 1340
1341static inline void tcp_push_pending_frames(struct sock *sk)
1342{
1343 if (tcp_send_head(sk)) {
1344 struct tcp_sock *tp = tcp_sk(sk);
1345
1346 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1347 }
1348}
1349
1345/* Start sequence of the highest skb with SACKed bit, valid only if 1350/* Start sequence of the highest skb with SACKed bit, valid only if
1346 * sacked > 0 or when the caller has ensured validity by itself. 1351 * sacked > 0 or when the caller has ensured validity by itself.
1347 */ 1352 */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 60c27706e7b..fcee547ca7e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1408,9 +1408,9 @@ extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1408 xfrm_address_t *saddr, u8 proto); 1408 xfrm_address_t *saddr, u8 proto);
1409extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); 1409extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1410extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family); 1410extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1411extern __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr); 1411extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1412extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr); 1412extern void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr);
1413extern __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr); 1413extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr);
1414extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1414extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1415extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1415extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1416extern int xfrm6_output(struct sk_buff *skb); 1416extern int xfrm6_output(struct sk_buff *skb);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3b8aeec4e32..af4aaa6c36f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -681,24 +681,55 @@ static char *mac_address_string(char *buf, char *end, u8 *addr,
681 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; 681 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
682 char *p = mac_addr; 682 char *p = mac_addr;
683 int i; 683 int i;
684 char separator;
685
686 if (fmt[1] == 'F') { /* FDDI canonical format */
687 separator = '-';
688 } else {
689 separator = ':';
690 }
684 691
685 for (i = 0; i < 6; i++) { 692 for (i = 0; i < 6; i++) {
686 p = pack_hex_byte(p, addr[i]); 693 p = pack_hex_byte(p, addr[i]);
687 if (fmt[0] == 'M' && i != 5) 694 if (fmt[0] == 'M' && i != 5)
688 *p++ = ':'; 695 *p++ = separator;
689 } 696 }
690 *p = '\0'; 697 *p = '\0';
691 698
692 return string(buf, end, mac_addr, spec); 699 return string(buf, end, mac_addr, spec);
693} 700}
694 701
695static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) 702static char *ip4_string(char *p, const u8 *addr, const char *fmt)
696{ 703{
697 int i; 704 int i;
698 705 bool leading_zeros = (fmt[0] == 'i');
706 int index;
707 int step;
708
709 switch (fmt[2]) {
710 case 'h':
711#ifdef __BIG_ENDIAN
712 index = 0;
713 step = 1;
714#else
715 index = 3;
716 step = -1;
717#endif
718 break;
719 case 'l':
720 index = 3;
721 step = -1;
722 break;
723 case 'n':
724 case 'b':
725 default:
726 index = 0;
727 step = 1;
728 break;
729 }
699 for (i = 0; i < 4; i++) { 730 for (i = 0; i < 4; i++) {
700 char temp[3]; /* hold each IP quad in reverse order */ 731 char temp[3]; /* hold each IP quad in reverse order */
701 int digits = put_dec_trunc(temp, addr[i]) - temp; 732 int digits = put_dec_trunc(temp, addr[index]) - temp;
702 if (leading_zeros) { 733 if (leading_zeros) {
703 if (digits < 3) 734 if (digits < 3)
704 *p++ = '0'; 735 *p++ = '0';
@@ -710,6 +741,7 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
710 *p++ = temp[digits]; 741 *p++ = temp[digits];
711 if (i < 3) 742 if (i < 3)
712 *p++ = '.'; 743 *p++ = '.';
744 index += step;
713 } 745 }
714 *p = '\0'; 746 *p = '\0';
715 747
@@ -789,7 +821,7 @@ static char *ip6_compressed_string(char *p, const char *addr)
789 if (useIPv4) { 821 if (useIPv4) {
790 if (needcolon) 822 if (needcolon)
791 *p++ = ':'; 823 *p++ = ':';
792 p = ip4_string(p, &in6.s6_addr[12], false); 824 p = ip4_string(p, &in6.s6_addr[12], "I4");
793 } 825 }
794 *p = '\0'; 826 *p = '\0';
795 827
@@ -829,7 +861,7 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
829{ 861{
830 char ip4_addr[sizeof("255.255.255.255")]; 862 char ip4_addr[sizeof("255.255.255.255")];
831 863
832 ip4_string(ip4_addr, addr, fmt[0] == 'i'); 864 ip4_string(ip4_addr, addr, fmt);
833 865
834 return string(buf, end, ip4_addr, spec); 866 return string(buf, end, ip4_addr, spec);
835} 867}
@@ -896,12 +928,15 @@ static char *uuid_string(char *buf, char *end, const u8 *addr,
896 * - 'M' For a 6-byte MAC address, it prints the address in the 928 * - 'M' For a 6-byte MAC address, it prints the address in the
897 * usual colon-separated hex notation 929 * usual colon-separated hex notation
898 * - 'm' For a 6-byte MAC address, it prints the hex address without colons 930 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
931 * - 'MF' For a 6-byte MAC FDDI address, it prints the address
932 * with a dash-separated hex notation
899 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way 933 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
900 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) 934 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
901 * IPv6 uses colon separated network-order 16 bit hex with leading 0's 935 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
902 * - 'i' [46] for 'raw' IPv4/IPv6 addresses 936 * - 'i' [46] for 'raw' IPv4/IPv6 addresses
903 * IPv6 omits the colons (01020304...0f) 937 * IPv6 omits the colons (01020304...0f)
904 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) 938 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
939 * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order
905 * - 'I6c' for IPv6 addresses printed as specified by 940 * - 'I6c' for IPv6 addresses printed as specified by
906 * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 941 * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
907 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form 942 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
@@ -939,6 +974,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
939 return resource_string(buf, end, ptr, spec, fmt); 974 return resource_string(buf, end, ptr, spec, fmt);
940 case 'M': /* Colon separated: 00:01:02:03:04:05 */ 975 case 'M': /* Colon separated: 00:01:02:03:04:05 */
941 case 'm': /* Contiguous: 000102030405 */ 976 case 'm': /* Contiguous: 000102030405 */
977 /* [mM]F (FDDI, bit reversed) */
942 return mac_address_string(buf, end, ptr, spec, fmt); 978 return mac_address_string(buf, end, ptr, spec, fmt);
943 case 'I': /* Formatted IP supported 979 case 'I': /* Formatted IP supported
944 * 4: 1.2.3.4 980 * 4: 1.2.3.4
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
index ded9081f402..0777654147c 100644
--- a/mm/mmu_context.c
+++ b/mm/mmu_context.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/mmu_context.h> 7#include <linux/mmu_context.h>
8#include <linux/module.h>
8#include <linux/sched.h> 9#include <linux/sched.h>
9 10
10#include <asm/mmu_context.h> 11#include <asm/mmu_context.h>
@@ -37,6 +38,7 @@ void use_mm(struct mm_struct *mm)
37 if (active_mm != mm) 38 if (active_mm != mm)
38 mmdrop(active_mm); 39 mmdrop(active_mm);
39} 40}
41EXPORT_SYMBOL_GPL(use_mm);
40 42
41/* 43/*
42 * unuse_mm 44 * unuse_mm
@@ -56,3 +58,4 @@ void unuse_mm(struct mm_struct *mm)
56 enter_lazy_tlb(mm, tsk); 58 enter_lazy_tlb(mm, tsk);
57 task_unlock(tsk); 59 task_unlock(tsk);
58} 60}
61EXPORT_SYMBOL_GPL(unuse_mm);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 33f90e7362c..453512266ea 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -663,7 +663,7 @@ out:
663 return err; 663 return err;
664} 664}
665 665
666static int vlan_init_net(struct net *net) 666static int __net_init vlan_init_net(struct net *net)
667{ 667{
668 struct vlan_net *vn = net_generic(net, vlan_net_id); 668 struct vlan_net *vn = net_generic(net, vlan_net_id);
669 int err; 669 int err;
@@ -675,7 +675,7 @@ static int vlan_init_net(struct net *net)
675 return err; 675 return err;
676} 676}
677 677
678static void vlan_exit_net(struct net *net) 678static void __net_exit vlan_exit_net(struct net *net)
679{ 679{
680 vlan_proc_cleanup(net); 680 vlan_proc_cleanup(net);
681} 681}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e75a2f3b10a..c0316e0ca6e 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -14,6 +14,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
14 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb))
15 goto drop; 15 goto drop;
16 16
17 skb->skb_iif = skb->dev->ifindex;
17 __vlan_hwaccel_put_tag(skb, vlan_tci); 18 __vlan_hwaccel_put_tag(skb, vlan_tci);
18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 19 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
19 20
@@ -85,6 +86,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
85 if (skb_bond_should_drop(skb)) 86 if (skb_bond_should_drop(skb))
86 goto drop; 87 goto drop;
87 88
89 skb->skb_iif = skb->dev->ifindex;
88 __vlan_hwaccel_put_tag(skb, vlan_tci); 90 __vlan_hwaccel_put_tag(skb, vlan_tci);
89 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 91 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
90 92
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index c1b92cab46c..a9e1f178561 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -263,11 +263,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
263 vhdr->h_vlan_TCI = htons(vlan_tci); 263 vhdr->h_vlan_TCI = htons(vlan_tci);
264 264
265 /* 265 /*
266 * Set the protocol type. For a packet of type ETH_P_802_3 we 266 * Set the protocol type. For a packet of type ETH_P_802_3/2 we
267 * put the length in here instead. It is up to the 802.2 267 * put the length in here instead.
268 * layer to carry protocol information.
269 */ 268 */
270 if (type != ETH_P_802_3) 269 if (type != ETH_P_802_3 && type != ETH_P_802_2)
271 vhdr->h_vlan_encapsulated_proto = htons(type); 270 vhdr->h_vlan_encapsulated_proto = htons(type);
272 else 271 else
273 vhdr->h_vlan_encapsulated_proto = htons(len); 272 vhdr->h_vlan_encapsulated_proto = htons(len);
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 9ec1f057c03..afead353e21 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -140,7 +140,7 @@ void vlan_proc_cleanup(struct net *net)
140 * Create /proc/net/vlan entries 140 * Create /proc/net/vlan entries
141 */ 141 */
142 142
143int vlan_proc_init(struct net *net) 143int __net_init vlan_proc_init(struct net *net)
144{ 144{
145 struct vlan_net *vn = net_generic(net, vlan_net_id); 145 struct vlan_net *vn = net_generic(net, vlan_net_id);
146 146
diff --git a/net/atm/addr.c b/net/atm/addr.c
index 82e85abc303..cf3ae8b4757 100644
--- a/net/atm/addr.c
+++ b/net/atm/addr.c
@@ -4,7 +4,7 @@
4 4
5#include <linux/atm.h> 5#include <linux/atm.h>
6#include <linux/atmdev.h> 6#include <linux/atmdev.h>
7#include <asm/uaccess.h> 7#include <linux/uaccess.h>
8 8
9#include "signaling.h" 9#include "signaling.h"
10#include "addr.h" 10#include "addr.h"
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index 02cc7e71efe..fc63526d869 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -2,37 +2,35 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL ICA */
4 4
5
6#include <linux/module.h> 5#include <linux/module.h>
7#include <linux/atm.h> 6#include <linux/atm.h>
8#include <linux/atmdev.h> 7#include <linux/atmdev.h>
9#include <linux/skbuff.h> 8#include <linux/skbuff.h>
10#include <linux/sonet.h> 9#include <linux/sonet.h>
11#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/errno.h>
12#include <asm/atomic.h> 12#include <asm/atomic.h>
13#include <asm/errno.h>
14
15 13
16int atm_charge(struct atm_vcc *vcc,int truesize) 14int atm_charge(struct atm_vcc *vcc, int truesize)
17{ 15{
18 atm_force_charge(vcc,truesize); 16 atm_force_charge(vcc, truesize);
19 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) 17 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
20 return 1; 18 return 1;
21 atm_return(vcc,truesize); 19 atm_return(vcc, truesize);
22 atomic_inc(&vcc->stats->rx_drop); 20 atomic_inc(&vcc->stats->rx_drop);
23 return 0; 21 return 0;
24} 22}
23EXPORT_SYMBOL(atm_charge);
25 24
26 25struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
27struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 26 gfp_t gfp_flags)
28 gfp_t gfp_flags)
29{ 27{
30 struct sock *sk = sk_atm(vcc); 28 struct sock *sk = sk_atm(vcc);
31 int guess = atm_guess_pdu2truesize(pdu_size); 29 int guess = atm_guess_pdu2truesize(pdu_size);
32 30
33 atm_force_charge(vcc,guess); 31 atm_force_charge(vcc, guess);
34 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 32 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
35 struct sk_buff *skb = alloc_skb(pdu_size,gfp_flags); 33 struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags);
36 34
37 if (skb) { 35 if (skb) {
38 atomic_add(skb->truesize-guess, 36 atomic_add(skb->truesize-guess,
@@ -40,10 +38,11 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
40 return skb; 38 return skb;
41 } 39 }
42 } 40 }
43 atm_return(vcc,guess); 41 atm_return(vcc, guess);
44 atomic_inc(&vcc->stats->rx_drop); 42 atomic_inc(&vcc->stats->rx_drop);
45 return NULL; 43 return NULL;
46} 44}
45EXPORT_SYMBOL(atm_alloc_charge);
47 46
48 47
49/* 48/*
@@ -73,7 +72,6 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
73 * else * 72 * else *
74 */ 73 */
75 74
76
77int atm_pcr_goal(const struct atm_trafprm *tp) 75int atm_pcr_goal(const struct atm_trafprm *tp)
78{ 76{
79 if (tp->pcr && tp->pcr != ATM_MAX_PCR) 77 if (tp->pcr && tp->pcr != ATM_MAX_PCR)
@@ -84,26 +82,20 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
84 return -tp->max_pcr; 82 return -tp->max_pcr;
85 return 0; 83 return 0;
86} 84}
85EXPORT_SYMBOL(atm_pcr_goal);
87 86
88 87void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
89void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
90{ 88{
91#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) 89#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
92 __SONET_ITEMS 90 __SONET_ITEMS
93#undef __HANDLE_ITEM 91#undef __HANDLE_ITEM
94} 92}
93EXPORT_SYMBOL(sonet_copy_stats);
95 94
96 95void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
97void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
98{ 96{
99#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i) 97#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100 __SONET_ITEMS 98 __SONET_ITEMS
101#undef __HANDLE_ITEM 99#undef __HANDLE_ITEM
102} 100}
103
104
105EXPORT_SYMBOL(atm_charge);
106EXPORT_SYMBOL(atm_alloc_charge);
107EXPORT_SYMBOL(atm_pcr_goal);
108EXPORT_SYMBOL(sonet_copy_stats);
109EXPORT_SYMBOL(sonet_subtract_stats); 101EXPORT_SYMBOL(sonet_subtract_stats);
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index b5674dc2083..f693b78eb46 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -42,13 +42,14 @@ static ssize_t show_atmaddress(struct device *cdev,
42 42
43 spin_lock_irqsave(&adev->lock, flags); 43 spin_lock_irqsave(&adev->lock, flags);
44 list_for_each_entry(aaddr, &adev->local, entry) { 44 list_for_each_entry(aaddr, &adev->local, entry) {
45 for(i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) { 45 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
46 if (j == *fmt) { 46 if (j == *fmt) {
47 pos += sprintf(pos, "."); 47 pos += sprintf(pos, ".");
48 ++fmt; 48 ++fmt;
49 j = 0; 49 j = 0;
50 } 50 }
51 pos += sprintf(pos, "%02x", aaddr->addr.sas_addr.prv[i]); 51 pos += sprintf(pos, "%02x",
52 aaddr->addr.sas_addr.prv[i]);
52 } 53 }
53 pos += sprintf(pos, "\n"); 54 pos += sprintf(pos, "\n");
54 } 55 }
@@ -78,17 +79,17 @@ static ssize_t show_link_rate(struct device *cdev,
78 79
79 /* show the link rate, not the data rate */ 80 /* show the link rate, not the data rate */
80 switch (adev->link_rate) { 81 switch (adev->link_rate) {
81 case ATM_OC3_PCR: 82 case ATM_OC3_PCR:
82 link_rate = 155520000; 83 link_rate = 155520000;
83 break; 84 break;
84 case ATM_OC12_PCR: 85 case ATM_OC12_PCR:
85 link_rate = 622080000; 86 link_rate = 622080000;
86 break; 87 break;
87 case ATM_25_PCR: 88 case ATM_25_PCR:
88 link_rate = 25600000; 89 link_rate = 25600000;
89 break; 90 break;
90 default: 91 default:
91 link_rate = adev->link_rate * 8 * 53; 92 link_rate = adev->link_rate * 8 * 53;
92 } 93 }
93 pos += sprintf(pos, "%d\n", link_rate); 94 pos += sprintf(pos, "%d\n", link_rate);
94 95
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index c9230c39869..4d64d87e757 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -6,6 +6,8 @@
6 * Eric Kinzie, 2006-2007, US Naval Research Laboratory 6 * Eric Kinzie, 2006-2007, US Naval Research Laboratory
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
10
9#include <linux/module.h> 11#include <linux/module.h>
10#include <linux/init.h> 12#include <linux/init.h>
11#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -15,7 +17,7 @@
15#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
16#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
17#include <linux/ip.h> 19#include <linux/ip.h>
18#include <asm/uaccess.h> 20#include <linux/uaccess.h>
19#include <net/arp.h> 21#include <net/arp.h>
20#include <linux/atm.h> 22#include <linux/atm.h>
21#include <linux/atmdev.h> 23#include <linux/atmdev.h>
@@ -26,20 +28,14 @@
26 28
27#include "common.h" 29#include "common.h"
28 30
29#ifdef SKB_DEBUG
30static void skb_debug(const struct sk_buff *skb) 31static void skb_debug(const struct sk_buff *skb)
31{ 32{
33#ifdef SKB_DEBUG
32#define NUM2PRINT 50 34#define NUM2PRINT 50
33 char buf[NUM2PRINT * 3 + 1]; /* 3 chars per byte */ 35 print_hex_dump(KERN_DEBUG, "br2684: skb: ", DUMP_OFFSET,
34 int i = 0; 36 16, 1, skb->data, min(NUM2PRINT, skb->len), true);
35 for (i = 0; i < skb->len && i < NUM2PRINT; i++) {
36 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
37 }
38 printk(KERN_DEBUG "br2684: skb: %s\n", buf);
39}
40#else
41#define skb_debug(skb) do {} while (0)
42#endif 37#endif
38}
43 39
44#define BR2684_ETHERTYPE_LEN 2 40#define BR2684_ETHERTYPE_LEN 2
45#define BR2684_PAD_LEN 2 41#define BR2684_PAD_LEN 2
@@ -68,7 +64,7 @@ struct br2684_vcc {
68 struct atm_vcc *atmvcc; 64 struct atm_vcc *atmvcc;
69 struct net_device *device; 65 struct net_device *device;
70 /* keep old push, pop functions for chaining */ 66 /* keep old push, pop functions for chaining */
71 void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb); 67 void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
72 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); 68 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
73 enum br2684_encaps encaps; 69 enum br2684_encaps encaps;
74 struct list_head brvccs; 70 struct list_head brvccs;
@@ -148,7 +144,7 @@ static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
148 struct br2684_vcc *brvcc = BR2684_VCC(vcc); 144 struct br2684_vcc *brvcc = BR2684_VCC(vcc);
149 struct net_device *net_dev = skb->dev; 145 struct net_device *net_dev = skb->dev;
150 146
151 pr_debug("br2684_pop(vcc %p ; net_dev %p )\n", vcc, net_dev); 147 pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev);
152 brvcc->old_pop(vcc, skb); 148 brvcc->old_pop(vcc, skb);
153 149
154 if (!net_dev) 150 if (!net_dev)
@@ -244,7 +240,7 @@ static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
244 struct br2684_dev *brdev = BRPRIV(dev); 240 struct br2684_dev *brdev = BRPRIV(dev);
245 struct br2684_vcc *brvcc; 241 struct br2684_vcc *brvcc;
246 242
247 pr_debug("br2684_start_xmit, skb_dst(skb)=%p\n", skb_dst(skb)); 243 pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
248 read_lock(&devs_lock); 244 read_lock(&devs_lock);
249 brvcc = pick_outgoing_vcc(skb, brdev); 245 brvcc = pick_outgoing_vcc(skb, brdev);
250 if (brvcc == NULL) { 246 if (brvcc == NULL) {
@@ -300,7 +296,8 @@ static int br2684_setfilt(struct atm_vcc *atmvcc, void __user * arg)
300 struct br2684_dev *brdev; 296 struct br2684_dev *brdev;
301 read_lock(&devs_lock); 297 read_lock(&devs_lock);
302 brdev = BRPRIV(br2684_find_dev(&fs.ifspec)); 298 brdev = BRPRIV(br2684_find_dev(&fs.ifspec));
303 if (brdev == NULL || list_empty(&brdev->brvccs) || brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */ 299 if (brdev == NULL || list_empty(&brdev->brvccs) ||
300 brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
304 brvcc = NULL; 301 brvcc = NULL;
305 else 302 else
306 brvcc = list_entry_brvcc(brdev->brvccs.next); 303 brvcc = list_entry_brvcc(brdev->brvccs.next);
@@ -352,7 +349,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
352 struct net_device *net_dev = brvcc->device; 349 struct net_device *net_dev = brvcc->device;
353 struct br2684_dev *brdev = BRPRIV(net_dev); 350 struct br2684_dev *brdev = BRPRIV(net_dev);
354 351
355 pr_debug("br2684_push\n"); 352 pr_debug("\n");
356 353
357 if (unlikely(skb == NULL)) { 354 if (unlikely(skb == NULL)) {
358 /* skb==NULL means VCC is being destroyed */ 355 /* skb==NULL means VCC is being destroyed */
@@ -376,29 +373,25 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
376 __skb_trim(skb, skb->len - 4); 373 __skb_trim(skb, skb->len - 4);
377 374
378 /* accept packets that have "ipv[46]" in the snap header */ 375 /* accept packets that have "ipv[46]" in the snap header */
379 if ((skb->len >= (sizeof(llc_oui_ipv4))) 376 if ((skb->len >= (sizeof(llc_oui_ipv4))) &&
380 && 377 (memcmp(skb->data, llc_oui_ipv4,
381 (memcmp 378 sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) {
382 (skb->data, llc_oui_ipv4, 379 if (memcmp(skb->data + 6, ethertype_ipv6,
383 sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) { 380 sizeof(ethertype_ipv6)) == 0)
384 if (memcmp
385 (skb->data + 6, ethertype_ipv6,
386 sizeof(ethertype_ipv6)) == 0)
387 skb->protocol = htons(ETH_P_IPV6); 381 skb->protocol = htons(ETH_P_IPV6);
388 else if (memcmp 382 else if (memcmp(skb->data + 6, ethertype_ipv4,
389 (skb->data + 6, ethertype_ipv4, 383 sizeof(ethertype_ipv4)) == 0)
390 sizeof(ethertype_ipv4)) == 0)
391 skb->protocol = htons(ETH_P_IP); 384 skb->protocol = htons(ETH_P_IP);
392 else 385 else
393 goto error; 386 goto error;
394 skb_pull(skb, sizeof(llc_oui_ipv4)); 387 skb_pull(skb, sizeof(llc_oui_ipv4));
395 skb_reset_network_header(skb); 388 skb_reset_network_header(skb);
396 skb->pkt_type = PACKET_HOST; 389 skb->pkt_type = PACKET_HOST;
397 /* 390 /*
398 * Let us waste some time for checking the encapsulation. 391 * Let us waste some time for checking the encapsulation.
399 * Note, that only 7 char is checked so frames with a valid FCS 392 * Note, that only 7 char is checked so frames with a valid FCS
400 * are also accepted (but FCS is not checked of course). 393 * are also accepted (but FCS is not checked of course).
401 */ 394 */
402 } else if ((skb->len >= sizeof(llc_oui_pid_pad)) && 395 } else if ((skb->len >= sizeof(llc_oui_pid_pad)) &&
403 (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) { 396 (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
404 skb_pull(skb, sizeof(llc_oui_pid_pad)); 397 skb_pull(skb, sizeof(llc_oui_pid_pad));
@@ -479,8 +472,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
479 write_lock_irq(&devs_lock); 472 write_lock_irq(&devs_lock);
480 net_dev = br2684_find_dev(&be.ifspec); 473 net_dev = br2684_find_dev(&be.ifspec);
481 if (net_dev == NULL) { 474 if (net_dev == NULL) {
482 printk(KERN_ERR 475 pr_err("tried to attach to non-existant device\n");
483 "br2684: tried to attach to non-existant device\n");
484 err = -ENXIO; 476 err = -ENXIO;
485 goto error; 477 goto error;
486 } 478 }
@@ -494,17 +486,16 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
494 err = -EEXIST; 486 err = -EEXIST;
495 goto error; 487 goto error;
496 } 488 }
497 if (be.fcs_in != BR2684_FCSIN_NO || be.fcs_out != BR2684_FCSOUT_NO || 489 if (be.fcs_in != BR2684_FCSIN_NO ||
498 be.fcs_auto || be.has_vpiid || be.send_padding || (be.encaps != 490 be.fcs_out != BR2684_FCSOUT_NO ||
499 BR2684_ENCAPS_VC 491 be.fcs_auto || be.has_vpiid || be.send_padding ||
500 && be.encaps != 492 (be.encaps != BR2684_ENCAPS_VC &&
501 BR2684_ENCAPS_LLC) 493 be.encaps != BR2684_ENCAPS_LLC) ||
502 || be.min_size != 0) { 494 be.min_size != 0) {
503 err = -EINVAL; 495 err = -EINVAL;
504 goto error; 496 goto error;
505 } 497 }
506 pr_debug("br2684_regvcc vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, 498 pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc);
507 be.encaps, brvcc);
508 if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) { 499 if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
509 unsigned char *esi = atmvcc->dev->esi; 500 unsigned char *esi = atmvcc->dev->esi;
510 if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5]) 501 if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
@@ -541,7 +532,8 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
541 } 532 }
542 __module_get(THIS_MODULE); 533 __module_get(THIS_MODULE);
543 return 0; 534 return 0;
544 error: 535
536error:
545 write_unlock_irq(&devs_lock); 537 write_unlock_irq(&devs_lock);
546 kfree(brvcc); 538 kfree(brvcc);
547 return err; 539 return err;
@@ -587,7 +579,7 @@ static void br2684_setup_routed(struct net_device *netdev)
587 INIT_LIST_HEAD(&brdev->brvccs); 579 INIT_LIST_HEAD(&brdev->brvccs);
588} 580}
589 581
590static int br2684_create(void __user * arg) 582static int br2684_create(void __user *arg)
591{ 583{
592 int err; 584 int err;
593 struct net_device *netdev; 585 struct net_device *netdev;
@@ -595,11 +587,10 @@ static int br2684_create(void __user * arg)
595 struct atm_newif_br2684 ni; 587 struct atm_newif_br2684 ni;
596 enum br2684_payload payload; 588 enum br2684_payload payload;
597 589
598 pr_debug("br2684_create\n"); 590 pr_debug("\n");
599 591
600 if (copy_from_user(&ni, arg, sizeof ni)) { 592 if (copy_from_user(&ni, arg, sizeof ni))
601 return -EFAULT; 593 return -EFAULT;
602 }
603 594
604 if (ni.media & BR2684_FLAG_ROUTED) 595 if (ni.media & BR2684_FLAG_ROUTED)
605 payload = p_routed; 596 payload = p_routed;
@@ -607,9 +598,8 @@ static int br2684_create(void __user * arg)
607 payload = p_bridged; 598 payload = p_bridged;
608 ni.media &= 0xffff; /* strip flags */ 599 ni.media &= 0xffff; /* strip flags */
609 600
610 if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500) { 601 if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500)
611 return -EINVAL; 602 return -EINVAL;
612 }
613 603
614 netdev = alloc_netdev(sizeof(struct br2684_dev), 604 netdev = alloc_netdev(sizeof(struct br2684_dev),
615 ni.ifname[0] ? ni.ifname : "nas%d", 605 ni.ifname[0] ? ni.ifname : "nas%d",
@@ -624,7 +614,7 @@ static int br2684_create(void __user * arg)
624 /* open, stop, do_ioctl ? */ 614 /* open, stop, do_ioctl ? */
625 err = register_netdev(netdev); 615 err = register_netdev(netdev);
626 if (err < 0) { 616 if (err < 0) {
627 printk(KERN_ERR "br2684_create: register_netdev failed\n"); 617 pr_err("register_netdev failed\n");
628 free_netdev(netdev); 618 free_netdev(netdev);
629 return err; 619 return err;
630 } 620 }
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 64629c35434..ebfa022008f 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -2,6 +2,8 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
6
5#include <linux/string.h> 7#include <linux/string.h>
6#include <linux/errno.h> 8#include <linux/errno.h>
7#include <linux/kernel.h> /* for UINT_MAX */ 9#include <linux/kernel.h> /* for UINT_MAX */
@@ -30,10 +32,10 @@
30#include <linux/jhash.h> 32#include <linux/jhash.h>
31#include <net/route.h> /* for struct rtable and routing */ 33#include <net/route.h> /* for struct rtable and routing */
32#include <net/icmp.h> /* icmp_send */ 34#include <net/icmp.h> /* icmp_send */
33#include <asm/param.h> /* for HZ */ 35#include <linux/param.h> /* for HZ */
36#include <linux/uaccess.h>
34#include <asm/byteorder.h> /* for htons etc. */ 37#include <asm/byteorder.h> /* for htons etc. */
35#include <asm/system.h> /* save/restore_flags */ 38#include <asm/system.h> /* save/restore_flags */
36#include <asm/uaccess.h>
37#include <asm/atomic.h> 39#include <asm/atomic.h>
38 40
39#include "common.h" 41#include "common.h"
@@ -51,13 +53,13 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
51 struct atmarp_ctrl *ctrl; 53 struct atmarp_ctrl *ctrl;
52 struct sk_buff *skb; 54 struct sk_buff *skb;
53 55
54 pr_debug("to_atmarpd(%d)\n", type); 56 pr_debug("(%d)\n", type);
55 if (!atmarpd) 57 if (!atmarpd)
56 return -EUNATCH; 58 return -EUNATCH;
57 skb = alloc_skb(sizeof(struct atmarp_ctrl),GFP_ATOMIC); 59 skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
58 if (!skb) 60 if (!skb)
59 return -ENOMEM; 61 return -ENOMEM;
60 ctrl = (struct atmarp_ctrl *) skb_put(skb,sizeof(struct atmarp_ctrl)); 62 ctrl = (struct atmarp_ctrl *)skb_put(skb, sizeof(struct atmarp_ctrl));
61 ctrl->type = type; 63 ctrl->type = type;
62 ctrl->itf_num = itf; 64 ctrl->itf_num = itf;
63 ctrl->ip = ip; 65 ctrl->ip = ip;
@@ -71,8 +73,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
71 73
72static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) 74static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
73{ 75{
74 pr_debug("link_vcc %p to entry %p (neigh %p)\n", clip_vcc, entry, 76 pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh);
75 entry->neigh);
76 clip_vcc->entry = entry; 77 clip_vcc->entry = entry;
77 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */ 78 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */
78 clip_vcc->next = entry->vccs; 79 clip_vcc->next = entry->vccs;
@@ -86,7 +87,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
86 struct clip_vcc **walk; 87 struct clip_vcc **walk;
87 88
88 if (!entry) { 89 if (!entry) {
89 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); 90 pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
90 return; 91 return;
91 } 92 }
92 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ 93 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
@@ -106,13 +107,11 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
106 error = neigh_update(entry->neigh, NULL, NUD_NONE, 107 error = neigh_update(entry->neigh, NULL, NUD_NONE,
107 NEIGH_UPDATE_F_ADMIN); 108 NEIGH_UPDATE_F_ADMIN);
108 if (error) 109 if (error)
109 printk(KERN_CRIT "unlink_clip_vcc: " 110 pr_crit("neigh_update failed with %d\n", error);
110 "neigh_update failed with %d\n", error);
111 goto out; 111 goto out;
112 } 112 }
113 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " 113 pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
114 "0x%p)\n", entry, clip_vcc); 114out:
115 out:
116 netif_tx_unlock_bh(entry->neigh->dev); 115 netif_tx_unlock_bh(entry->neigh->dev);
117} 116}
118 117
@@ -127,7 +126,7 @@ static int neigh_check_cb(struct neighbour *n)
127 126
128 if (cv->idle_timeout && time_after(jiffies, exp)) { 127 if (cv->idle_timeout && time_after(jiffies, exp)) {
129 pr_debug("releasing vcc %p->%p of entry %p\n", 128 pr_debug("releasing vcc %p->%p of entry %p\n",
130 cv, cv->vcc, entry); 129 cv, cv->vcc, entry);
131 vcc_release_async(cv->vcc, -ETIMEDOUT); 130 vcc_release_async(cv->vcc, -ETIMEDOUT);
132 } 131 }
133 } 132 }
@@ -139,7 +138,7 @@ static int neigh_check_cb(struct neighbour *n)
139 struct sk_buff *skb; 138 struct sk_buff *skb;
140 139
141 pr_debug("destruction postponed with ref %d\n", 140 pr_debug("destruction postponed with ref %d\n",
142 atomic_read(&n->refcnt)); 141 atomic_read(&n->refcnt));
143 142
144 while ((skb = skb_dequeue(&n->arp_queue)) != NULL) 143 while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
145 dev_kfree_skb(skb); 144 dev_kfree_skb(skb);
@@ -163,7 +162,7 @@ static int clip_arp_rcv(struct sk_buff *skb)
163{ 162{
164 struct atm_vcc *vcc; 163 struct atm_vcc *vcc;
165 164
166 pr_debug("clip_arp_rcv\n"); 165 pr_debug("\n");
167 vcc = ATM_SKB(skb)->vcc; 166 vcc = ATM_SKB(skb)->vcc;
168 if (!vcc || !atm_charge(vcc, skb->truesize)) { 167 if (!vcc || !atm_charge(vcc, skb->truesize)) {
169 dev_kfree_skb_any(skb); 168 dev_kfree_skb_any(skb);
@@ -188,7 +187,7 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
188{ 187{
189 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 188 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
190 189
191 pr_debug("clip push\n"); 190 pr_debug("\n");
192 if (!skb) { 191 if (!skb) {
193 pr_debug("removing VCC %p\n", clip_vcc); 192 pr_debug("removing VCC %p\n", clip_vcc);
194 if (clip_vcc->entry) 193 if (clip_vcc->entry)
@@ -206,12 +205,12 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
206 } 205 }
207 ATM_SKB(skb)->vcc = vcc; 206 ATM_SKB(skb)->vcc = vcc;
208 skb_reset_mac_header(skb); 207 skb_reset_mac_header(skb);
209 if (!clip_vcc->encap 208 if (!clip_vcc->encap ||
210 || skb->len < RFC1483LLC_LEN 209 skb->len < RFC1483LLC_LEN ||
211 || memcmp(skb->data, llc_oui, sizeof (llc_oui))) 210 memcmp(skb->data, llc_oui, sizeof(llc_oui)))
212 skb->protocol = htons(ETH_P_IP); 211 skb->protocol = htons(ETH_P_IP);
213 else { 212 else {
214 skb->protocol = ((__be16 *) skb->data)[3]; 213 skb->protocol = ((__be16 *)skb->data)[3];
215 skb_pull(skb, RFC1483LLC_LEN); 214 skb_pull(skb, RFC1483LLC_LEN);
216 if (skb->protocol == htons(ETH_P_ARP)) { 215 if (skb->protocol == htons(ETH_P_ARP)) {
217 skb->dev->stats.rx_packets++; 216 skb->dev->stats.rx_packets++;
@@ -239,7 +238,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
239 int old; 238 int old;
240 unsigned long flags; 239 unsigned long flags;
241 240
242 pr_debug("clip_pop(vcc %p)\n", vcc); 241 pr_debug("(vcc %p)\n", vcc);
243 clip_vcc->old_pop(vcc, skb); 242 clip_vcc->old_pop(vcc, skb);
244 /* skb->dev == NULL in outbound ARP packets */ 243 /* skb->dev == NULL in outbound ARP packets */
245 if (!dev) 244 if (!dev)
@@ -255,7 +254,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
255 254
256static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) 255static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
257{ 256{
258 pr_debug("clip_neigh_solicit (neigh %p, skb %p)\n", neigh, skb); 257 pr_debug("(neigh %p, skb %p)\n", neigh, skb);
259 to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip); 258 to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
260} 259}
261 260
@@ -284,7 +283,7 @@ static int clip_constructor(struct neighbour *neigh)
284 struct in_device *in_dev; 283 struct in_device *in_dev;
285 struct neigh_parms *parms; 284 struct neigh_parms *parms;
286 285
287 pr_debug("clip_constructor (neigh %p, entry %p)\n", neigh, entry); 286 pr_debug("(neigh %p, entry %p)\n", neigh, entry);
288 neigh->type = inet_addr_type(&init_net, entry->ip); 287 neigh->type = inet_addr_type(&init_net, entry->ip);
289 if (neigh->type != RTN_UNICAST) 288 if (neigh->type != RTN_UNICAST)
290 return -EINVAL; 289 return -EINVAL;
@@ -369,9 +368,9 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
369 int old; 368 int old;
370 unsigned long flags; 369 unsigned long flags;
371 370
372 pr_debug("clip_start_xmit (skb %p)\n", skb); 371 pr_debug("(skb %p)\n", skb);
373 if (!skb_dst(skb)) { 372 if (!skb_dst(skb)) {
374 printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n"); 373 pr_err("skb_dst(skb) == NULL\n");
375 dev_kfree_skb(skb); 374 dev_kfree_skb(skb);
376 dev->stats.tx_dropped++; 375 dev->stats.tx_dropped++;
377 return NETDEV_TX_OK; 376 return NETDEV_TX_OK;
@@ -385,7 +384,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
385 return 0; 384 return 0;
386 } 385 }
387#endif 386#endif
388 printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); 387 pr_err("NO NEIGHBOUR !\n");
389 dev_kfree_skb(skb); 388 dev_kfree_skb(skb);
390 dev->stats.tx_dropped++; 389 dev->stats.tx_dropped++;
391 return NETDEV_TX_OK; 390 return NETDEV_TX_OK;
@@ -421,7 +420,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
421 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); 420 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
422 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ 421 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
423 if (old) { 422 if (old) {
424 printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); 423 pr_warning("XOFF->XOFF transition\n");
425 return NETDEV_TX_OK; 424 return NETDEV_TX_OK;
426 } 425 }
427 dev->stats.tx_packets++; 426 dev->stats.tx_packets++;
@@ -456,7 +455,7 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
456 clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL); 455 clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
457 if (!clip_vcc) 456 if (!clip_vcc)
458 return -ENOMEM; 457 return -ENOMEM;
459 pr_debug("mkip clip_vcc %p vcc %p\n", clip_vcc, vcc); 458 pr_debug("%p vcc %p\n", clip_vcc, vcc);
460 clip_vcc->vcc = vcc; 459 clip_vcc->vcc = vcc;
461 vcc->user_back = clip_vcc; 460 vcc->user_back = clip_vcc;
462 set_bit(ATM_VF_IS_CLIP, &vcc->flags); 461 set_bit(ATM_VF_IS_CLIP, &vcc->flags);
@@ -506,16 +505,16 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
506 struct rtable *rt; 505 struct rtable *rt;
507 506
508 if (vcc->push != clip_push) { 507 if (vcc->push != clip_push) {
509 printk(KERN_WARNING "clip_setentry: non-CLIP VCC\n"); 508 pr_warning("non-CLIP VCC\n");
510 return -EBADF; 509 return -EBADF;
511 } 510 }
512 clip_vcc = CLIP_VCC(vcc); 511 clip_vcc = CLIP_VCC(vcc);
513 if (!ip) { 512 if (!ip) {
514 if (!clip_vcc->entry) { 513 if (!clip_vcc->entry) {
515 printk(KERN_ERR "hiding hidden ATMARP entry\n"); 514 pr_err("hiding hidden ATMARP entry\n");
516 return 0; 515 return 0;
517 } 516 }
518 pr_debug("setentry: remove\n"); 517 pr_debug("remove\n");
519 unlink_clip_vcc(clip_vcc); 518 unlink_clip_vcc(clip_vcc);
520 return 0; 519 return 0;
521 } 520 }
@@ -529,9 +528,9 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
529 entry = NEIGH2ENTRY(neigh); 528 entry = NEIGH2ENTRY(neigh);
530 if (entry != clip_vcc->entry) { 529 if (entry != clip_vcc->entry) {
531 if (!clip_vcc->entry) 530 if (!clip_vcc->entry)
532 pr_debug("setentry: add\n"); 531 pr_debug("add\n");
533 else { 532 else {
534 pr_debug("setentry: update\n"); 533 pr_debug("update\n");
535 unlink_clip_vcc(clip_vcc); 534 unlink_clip_vcc(clip_vcc);
536 } 535 }
537 link_vcc(clip_vcc, entry); 536 link_vcc(clip_vcc, entry);
@@ -614,16 +613,16 @@ static int clip_device_event(struct notifier_block *this, unsigned long event,
614 613
615 switch (event) { 614 switch (event) {
616 case NETDEV_UP: 615 case NETDEV_UP:
617 pr_debug("clip_device_event NETDEV_UP\n"); 616 pr_debug("NETDEV_UP\n");
618 to_atmarpd(act_up, PRIV(dev)->number, 0); 617 to_atmarpd(act_up, PRIV(dev)->number, 0);
619 break; 618 break;
620 case NETDEV_GOING_DOWN: 619 case NETDEV_GOING_DOWN:
621 pr_debug("clip_device_event NETDEV_DOWN\n"); 620 pr_debug("NETDEV_DOWN\n");
622 to_atmarpd(act_down, PRIV(dev)->number, 0); 621 to_atmarpd(act_down, PRIV(dev)->number, 0);
623 break; 622 break;
624 case NETDEV_CHANGE: 623 case NETDEV_CHANGE:
625 case NETDEV_CHANGEMTU: 624 case NETDEV_CHANGEMTU:
626 pr_debug("clip_device_event NETDEV_CHANGE*\n"); 625 pr_debug("NETDEV_CHANGE*\n");
627 to_atmarpd(act_change, PRIV(dev)->number, 0); 626 to_atmarpd(act_change, PRIV(dev)->number, 0);
628 break; 627 break;
629 } 628 }
@@ -645,7 +644,6 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event,
645 return clip_device_event(this, NETDEV_CHANGE, in_dev->dev); 644 return clip_device_event(this, NETDEV_CHANGE, in_dev->dev);
646} 645}
647 646
648
649static struct notifier_block clip_dev_notifier = { 647static struct notifier_block clip_dev_notifier = {
650 .notifier_call = clip_device_event, 648 .notifier_call = clip_device_event,
651}; 649};
@@ -660,7 +658,7 @@ static struct notifier_block clip_inet_notifier = {
660 658
661static void atmarpd_close(struct atm_vcc *vcc) 659static void atmarpd_close(struct atm_vcc *vcc)
662{ 660{
663 pr_debug("atmarpd_close\n"); 661 pr_debug("\n");
664 662
665 rtnl_lock(); 663 rtnl_lock();
666 atmarpd = NULL; 664 atmarpd = NULL;
@@ -671,7 +669,6 @@ static void atmarpd_close(struct atm_vcc *vcc)
671 module_put(THIS_MODULE); 669 module_put(THIS_MODULE);
672} 670}
673 671
674
675static struct atmdev_ops atmarpd_dev_ops = { 672static struct atmdev_ops atmarpd_dev_ops = {
676 .close = atmarpd_close 673 .close = atmarpd_close
677}; 674};
@@ -693,11 +690,11 @@ static int atm_init_atmarp(struct atm_vcc *vcc)
693 return -EADDRINUSE; 690 return -EADDRINUSE;
694 } 691 }
695 692
696 mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ); 693 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
697 694
698 atmarpd = vcc; 695 atmarpd = vcc;
699 set_bit(ATM_VF_META,&vcc->flags); 696 set_bit(ATM_VF_META, &vcc->flags);
700 set_bit(ATM_VF_READY,&vcc->flags); 697 set_bit(ATM_VF_READY, &vcc->flags);
701 /* allow replies and avoid getting closed if signaling dies */ 698 /* allow replies and avoid getting closed if signaling dies */
702 vcc->dev = &atmarpd_dev; 699 vcc->dev = &atmarpd_dev;
703 vcc_insert_socket(sk_atm(vcc)); 700 vcc_insert_socket(sk_atm(vcc));
@@ -950,8 +947,7 @@ static int __init atm_clip_init(void)
950 947
951 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops); 948 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
952 if (!p) { 949 if (!p) {
953 printk(KERN_ERR "Unable to initialize " 950 pr_err("Unable to initialize /proc/net/atm/arp\n");
954 "/proc/net/atm/arp\n");
955 atm_clip_exit_noproc(); 951 atm_clip_exit_noproc();
956 return -ENOMEM; 952 return -ENOMEM;
957 } 953 }
diff --git a/net/atm/common.c b/net/atm/common.c
index d61e051e0a3..74d095a081e 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/kmod.h> 8#include <linux/kmod.h>
@@ -18,11 +19,10 @@
18#include <linux/bitops.h> 19#include <linux/bitops.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <net/sock.h> /* struct sock */ 21#include <net/sock.h> /* struct sock */
22#include <linux/uaccess.h>
23#include <linux/poll.h>
21 24
22#include <asm/uaccess.h>
23#include <asm/atomic.h> 25#include <asm/atomic.h>
24#include <asm/poll.h>
25
26 26
27#include "resources.h" /* atm_find_dev */ 27#include "resources.h" /* atm_find_dev */
28#include "common.h" /* prototypes */ 28#include "common.h" /* prototypes */
@@ -31,13 +31,15 @@
31#include "signaling.h" /* for WAITING and sigd_attach */ 31#include "signaling.h" /* for WAITING and sigd_attach */
32 32
33struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; 33struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
34EXPORT_SYMBOL(vcc_hash);
35
34DEFINE_RWLOCK(vcc_sklist_lock); 36DEFINE_RWLOCK(vcc_sklist_lock);
37EXPORT_SYMBOL(vcc_sklist_lock);
35 38
36static void __vcc_insert_socket(struct sock *sk) 39static void __vcc_insert_socket(struct sock *sk)
37{ 40{
38 struct atm_vcc *vcc = atm_sk(sk); 41 struct atm_vcc *vcc = atm_sk(sk);
39 struct hlist_head *head = &vcc_hash[vcc->vci & 42 struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)];
40 (VCC_HTABLE_SIZE - 1)];
41 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); 43 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
42 sk_add_node(sk, head); 44 sk_add_node(sk, head);
43} 45}
@@ -48,6 +50,7 @@ void vcc_insert_socket(struct sock *sk)
48 __vcc_insert_socket(sk); 50 __vcc_insert_socket(sk);
49 write_unlock_irq(&vcc_sklist_lock); 51 write_unlock_irq(&vcc_sklist_lock);
50} 52}
53EXPORT_SYMBOL(vcc_insert_socket);
51 54
52static void vcc_remove_socket(struct sock *sk) 55static void vcc_remove_socket(struct sock *sk)
53{ 56{
@@ -56,37 +59,32 @@ static void vcc_remove_socket(struct sock *sk)
56 write_unlock_irq(&vcc_sklist_lock); 59 write_unlock_irq(&vcc_sklist_lock);
57} 60}
58 61
59 62static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
60static struct sk_buff *alloc_tx(struct atm_vcc *vcc,unsigned int size)
61{ 63{
62 struct sk_buff *skb; 64 struct sk_buff *skb;
63 struct sock *sk = sk_atm(vcc); 65 struct sock *sk = sk_atm(vcc);
64 66
65 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { 67 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
66 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", 68 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
67 sk_wmem_alloc_get(sk), size, 69 sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
68 sk->sk_sndbuf);
69 return NULL; 70 return NULL;
70 } 71 }
71 while (!(skb = alloc_skb(size, GFP_KERNEL))) 72 while (!(skb = alloc_skb(size, GFP_KERNEL)))
72 schedule(); 73 schedule();
73 pr_debug("AlTx %d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); 74 pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
74 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 75 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
75 return skb; 76 return skb;
76} 77}
77 78
78
79EXPORT_SYMBOL(vcc_hash);
80EXPORT_SYMBOL(vcc_sklist_lock);
81EXPORT_SYMBOL(vcc_insert_socket);
82
83static void vcc_sock_destruct(struct sock *sk) 79static void vcc_sock_destruct(struct sock *sk)
84{ 80{
85 if (atomic_read(&sk->sk_rmem_alloc)) 81 if (atomic_read(&sk->sk_rmem_alloc))
86 printk(KERN_DEBUG "vcc_sock_destruct: rmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_rmem_alloc)); 82 printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
83 __func__, atomic_read(&sk->sk_rmem_alloc));
87 84
88 if (atomic_read(&sk->sk_wmem_alloc)) 85 if (atomic_read(&sk->sk_wmem_alloc))
89 printk(KERN_DEBUG "vcc_sock_destruct: wmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_wmem_alloc)); 86 printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
87 __func__, atomic_read(&sk->sk_wmem_alloc));
90} 88}
91 89
92static void vcc_def_wakeup(struct sock *sk) 90static void vcc_def_wakeup(struct sock *sk)
@@ -142,8 +140,8 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
142 140
143 vcc = atm_sk(sk); 141 vcc = atm_sk(sk);
144 vcc->dev = NULL; 142 vcc->dev = NULL;
145 memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc)); 143 memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
146 memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc)); 144 memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
147 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ 145 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
148 atomic_set(&sk->sk_wmem_alloc, 1); 146 atomic_set(&sk->sk_wmem_alloc, 1);
149 atomic_set(&sk->sk_rmem_alloc, 0); 147 atomic_set(&sk->sk_rmem_alloc, 0);
@@ -156,7 +154,6 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
156 return 0; 154 return 0;
157} 155}
158 156
159
160static void vcc_destroy_socket(struct sock *sk) 157static void vcc_destroy_socket(struct sock *sk)
161{ 158{
162 struct atm_vcc *vcc = atm_sk(sk); 159 struct atm_vcc *vcc = atm_sk(sk);
@@ -171,7 +168,7 @@ static void vcc_destroy_socket(struct sock *sk)
171 vcc->push(vcc, NULL); /* atmarpd has no push */ 168 vcc->push(vcc, NULL); /* atmarpd has no push */
172 169
173 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 170 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
174 atm_return(vcc,skb->truesize); 171 atm_return(vcc, skb->truesize);
175 kfree_skb(skb); 172 kfree_skb(skb);
176 } 173 }
177 174
@@ -182,7 +179,6 @@ static void vcc_destroy_socket(struct sock *sk)
182 vcc_remove_socket(sk); 179 vcc_remove_socket(sk);
183} 180}
184 181
185
186int vcc_release(struct socket *sock) 182int vcc_release(struct socket *sock)
187{ 183{
188 struct sock *sk = sock->sk; 184 struct sock *sk = sock->sk;
@@ -197,7 +193,6 @@ int vcc_release(struct socket *sock)
197 return 0; 193 return 0;
198} 194}
199 195
200
201void vcc_release_async(struct atm_vcc *vcc, int reply) 196void vcc_release_async(struct atm_vcc *vcc, int reply)
202{ 197{
203 struct sock *sk = sk_atm(vcc); 198 struct sock *sk = sk_atm(vcc);
@@ -208,8 +203,6 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
208 clear_bit(ATM_VF_WAITING, &vcc->flags); 203 clear_bit(ATM_VF_WAITING, &vcc->flags);
209 sk->sk_state_change(sk); 204 sk->sk_state_change(sk);
210} 205}
211
212
213EXPORT_SYMBOL(vcc_release_async); 206EXPORT_SYMBOL(vcc_release_async);
214 207
215 208
@@ -235,37 +228,37 @@ void atm_dev_release_vccs(struct atm_dev *dev)
235 write_unlock_irq(&vcc_sklist_lock); 228 write_unlock_irq(&vcc_sklist_lock);
236} 229}
237 230
238 231static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
239static int adjust_tp(struct atm_trafprm *tp,unsigned char aal)
240{ 232{
241 int max_sdu; 233 int max_sdu;
242 234
243 if (!tp->traffic_class) return 0; 235 if (!tp->traffic_class)
236 return 0;
244 switch (aal) { 237 switch (aal) {
245 case ATM_AAL0: 238 case ATM_AAL0:
246 max_sdu = ATM_CELL_SIZE-1; 239 max_sdu = ATM_CELL_SIZE-1;
247 break; 240 break;
248 case ATM_AAL34: 241 case ATM_AAL34:
249 max_sdu = ATM_MAX_AAL34_PDU; 242 max_sdu = ATM_MAX_AAL34_PDU;
250 break; 243 break;
251 default: 244 default:
252 printk(KERN_WARNING "ATM: AAL problems ... " 245 pr_warning("AAL problems ... (%d)\n", aal);
253 "(%d)\n",aal); 246 /* fall through */
254 /* fall through */ 247 case ATM_AAL5:
255 case ATM_AAL5: 248 max_sdu = ATM_MAX_AAL5_PDU;
256 max_sdu = ATM_MAX_AAL5_PDU;
257 } 249 }
258 if (!tp->max_sdu) tp->max_sdu = max_sdu; 250 if (!tp->max_sdu)
259 else if (tp->max_sdu > max_sdu) return -EINVAL; 251 tp->max_sdu = max_sdu;
260 if (!tp->max_cdv) tp->max_cdv = ATM_MAX_CDV; 252 else if (tp->max_sdu > max_sdu)
253 return -EINVAL;
254 if (!tp->max_cdv)
255 tp->max_cdv = ATM_MAX_CDV;
261 return 0; 256 return 0;
262} 257}
263 258
264
265static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) 259static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
266{ 260{
267 struct hlist_head *head = &vcc_hash[vci & 261 struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
268 (VCC_HTABLE_SIZE - 1)];
269 struct hlist_node *node; 262 struct hlist_node *node;
270 struct sock *s; 263 struct sock *s;
271 struct atm_vcc *walk; 264 struct atm_vcc *walk;
@@ -289,7 +282,6 @@ static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
289 return 0; 282 return 0;
290} 283}
291 284
292
293static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci) 285static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
294{ 286{
295 static short p; /* poor man's per-device cache */ 287 static short p; /* poor man's per-device cache */
@@ -327,14 +319,13 @@ static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
327 if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) && 319 if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) &&
328 *vpi == ATM_VPI_ANY) { 320 *vpi == ATM_VPI_ANY) {
329 p++; 321 p++;
330 if (p >= 1 << vcc->dev->ci_range.vpi_bits) p = 0; 322 if (p >= 1 << vcc->dev->ci_range.vpi_bits)
323 p = 0;
331 } 324 }
332 } 325 } while (old_p != p || old_c != c);
333 while (old_p != p || old_c != c);
334 return -EADDRINUSE; 326 return -EADDRINUSE;
335} 327}
336 328
337
338static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi, 329static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
339 int vci) 330 int vci)
340{ 331{
@@ -362,37 +353,46 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
362 __vcc_insert_socket(sk); 353 __vcc_insert_socket(sk);
363 write_unlock_irq(&vcc_sklist_lock); 354 write_unlock_irq(&vcc_sklist_lock);
364 switch (vcc->qos.aal) { 355 switch (vcc->qos.aal) {
365 case ATM_AAL0: 356 case ATM_AAL0:
366 error = atm_init_aal0(vcc); 357 error = atm_init_aal0(vcc);
367 vcc->stats = &dev->stats.aal0; 358 vcc->stats = &dev->stats.aal0;
368 break; 359 break;
369 case ATM_AAL34: 360 case ATM_AAL34:
370 error = atm_init_aal34(vcc); 361 error = atm_init_aal34(vcc);
371 vcc->stats = &dev->stats.aal34; 362 vcc->stats = &dev->stats.aal34;
372 break; 363 break;
373 case ATM_NO_AAL: 364 case ATM_NO_AAL:
374 /* ATM_AAL5 is also used in the "0 for default" case */ 365 /* ATM_AAL5 is also used in the "0 for default" case */
375 vcc->qos.aal = ATM_AAL5; 366 vcc->qos.aal = ATM_AAL5;
376 /* fall through */ 367 /* fall through */
377 case ATM_AAL5: 368 case ATM_AAL5:
378 error = atm_init_aal5(vcc); 369 error = atm_init_aal5(vcc);
379 vcc->stats = &dev->stats.aal5; 370 vcc->stats = &dev->stats.aal5;
380 break; 371 break;
381 default: 372 default:
382 error = -EPROTOTYPE; 373 error = -EPROTOTYPE;
383 } 374 }
384 if (!error) error = adjust_tp(&vcc->qos.txtp,vcc->qos.aal); 375 if (!error)
385 if (!error) error = adjust_tp(&vcc->qos.rxtp,vcc->qos.aal); 376 error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal);
377 if (!error)
378 error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal);
386 if (error) 379 if (error)
387 goto fail; 380 goto fail;
388 pr_debug("VCC %d.%d, AAL %d\n",vpi,vci,vcc->qos.aal); 381 pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal);
389 pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",vcc->qos.txtp.traffic_class, 382 pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",
390 vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu); 383 vcc->qos.txtp.traffic_class,
391 pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",vcc->qos.rxtp.traffic_class, 384 vcc->qos.txtp.min_pcr,
392 vcc->qos.rxtp.min_pcr,vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu); 385 vcc->qos.txtp.max_pcr,
386 vcc->qos.txtp.max_sdu);
387 pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",
388 vcc->qos.rxtp.traffic_class,
389 vcc->qos.rxtp.min_pcr,
390 vcc->qos.rxtp.max_pcr,
391 vcc->qos.rxtp.max_sdu);
393 392
394 if (dev->ops->open) { 393 if (dev->ops->open) {
395 if ((error = dev->ops->open(vcc))) 394 error = dev->ops->open(vcc);
395 if (error)
396 goto fail; 396 goto fail;
397 } 397 }
398 return 0; 398 return 0;
@@ -406,14 +406,13 @@ fail_module_put:
406 return error; 406 return error;
407} 407}
408 408
409
410int vcc_connect(struct socket *sock, int itf, short vpi, int vci) 409int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
411{ 410{
412 struct atm_dev *dev; 411 struct atm_dev *dev;
413 struct atm_vcc *vcc = ATM_SD(sock); 412 struct atm_vcc *vcc = ATM_SD(sock);
414 int error; 413 int error;
415 414
416 pr_debug("vcc_connect (vpi %d, vci %d)\n",vpi,vci); 415 pr_debug("(vpi %d, vci %d)\n", vpi, vci);
417 if (sock->state == SS_CONNECTED) 416 if (sock->state == SS_CONNECTED)
418 return -EISCONN; 417 return -EISCONN;
419 if (sock->state != SS_UNCONNECTED) 418 if (sock->state != SS_UNCONNECTED)
@@ -422,30 +421,33 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
422 return -EINVAL; 421 return -EINVAL;
423 422
424 if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC) 423 if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC)
425 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 424 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
426 else 425 else
427 if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) 426 if (test_bit(ATM_VF_PARTIAL, &vcc->flags))
428 return -EINVAL; 427 return -EINVAL;
429 pr_debug("vcc_connect (TX: cl %d,bw %d-%d,sdu %d; " 428 pr_debug("(TX: cl %d,bw %d-%d,sdu %d; "
430 "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n", 429 "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
431 vcc->qos.txtp.traffic_class,vcc->qos.txtp.min_pcr, 430 vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,
432 vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu, 431 vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu,
433 vcc->qos.rxtp.traffic_class,vcc->qos.rxtp.min_pcr, 432 vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,
434 vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu, 433 vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu,
435 vcc->qos.aal == ATM_AAL5 ? "" : vcc->qos.aal == ATM_AAL0 ? "" : 434 vcc->qos.aal == ATM_AAL5 ? "" :
436 " ??? code ",vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal); 435 vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ",
436 vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
437 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) 437 if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
438 return -EBADFD; 438 return -EBADFD;
439 if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS || 439 if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
440 vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) 440 vcc->qos.rxtp.traffic_class == ATM_ANYCLASS)
441 return -EINVAL; 441 return -EINVAL;
442 if (likely(itf != ATM_ITF_ANY)) { 442 if (likely(itf != ATM_ITF_ANY)) {
443 dev = try_then_request_module(atm_dev_lookup(itf), "atm-device-%d", itf); 443 dev = try_then_request_module(atm_dev_lookup(itf),
444 "atm-device-%d", itf);
444 } else { 445 } else {
445 dev = NULL; 446 dev = NULL;
446 mutex_lock(&atm_dev_mutex); 447 mutex_lock(&atm_dev_mutex);
447 if (!list_empty(&atm_devs)) { 448 if (!list_empty(&atm_devs)) {
448 dev = list_entry(atm_devs.next, struct atm_dev, dev_list); 449 dev = list_entry(atm_devs.next,
450 struct atm_dev, dev_list);
449 atm_dev_hold(dev); 451 atm_dev_hold(dev);
450 } 452 }
451 mutex_unlock(&atm_dev_mutex); 453 mutex_unlock(&atm_dev_mutex);
@@ -458,13 +460,12 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
458 return error; 460 return error;
459 } 461 }
460 if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) 462 if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC)
461 set_bit(ATM_VF_PARTIAL,&vcc->flags); 463 set_bit(ATM_VF_PARTIAL, &vcc->flags);
462 if (test_bit(ATM_VF_READY,&ATM_SD(sock)->flags)) 464 if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags))
463 sock->state = SS_CONNECTED; 465 sock->state = SS_CONNECTED;
464 return 0; 466 return 0;
465} 467}
466 468
467
468int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 469int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
469 size_t size, int flags) 470 size_t size, int flags)
470{ 471{
@@ -478,8 +479,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
478 if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */ 479 if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
479 return -EOPNOTSUPP; 480 return -EOPNOTSUPP;
480 vcc = ATM_SD(sock); 481 vcc = ATM_SD(sock);
481 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 482 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
482 test_bit(ATM_VF_CLOSE,&vcc->flags) || 483 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
483 !test_bit(ATM_VF_READY, &vcc->flags)) 484 !test_bit(ATM_VF_READY, &vcc->flags))
484 return 0; 485 return 0;
485 486
@@ -497,13 +498,12 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
497 if (error) 498 if (error)
498 return error; 499 return error;
499 sock_recv_ts_and_drops(msg, sk, skb); 500 sock_recv_ts_and_drops(msg, sk, skb);
500 pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); 501 pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
501 atm_return(vcc, skb->truesize); 502 atm_return(vcc, skb->truesize);
502 skb_free_datagram(sk, skb); 503 skb_free_datagram(sk, skb);
503 return copied; 504 return copied;
504} 505}
505 506
506
507int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 507int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
508 size_t total_len) 508 size_t total_len)
509{ 509{
@@ -511,7 +511,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
511 DEFINE_WAIT(wait); 511 DEFINE_WAIT(wait);
512 struct atm_vcc *vcc; 512 struct atm_vcc *vcc;
513 struct sk_buff *skb; 513 struct sk_buff *skb;
514 int eff,error; 514 int eff, error;
515 const void __user *buff; 515 const void __user *buff;
516 int size; 516 int size;
517 517
@@ -550,7 +550,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
550 eff = (size+3) & ~3; /* align to word boundary */ 550 eff = (size+3) & ~3; /* align to word boundary */
551 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 551 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
552 error = 0; 552 error = 0;
553 while (!(skb = alloc_tx(vcc,eff))) { 553 while (!(skb = alloc_tx(vcc, eff))) {
554 if (m->msg_flags & MSG_DONTWAIT) { 554 if (m->msg_flags & MSG_DONTWAIT) {
555 error = -EAGAIN; 555 error = -EAGAIN;
556 break; 556 break;
@@ -560,9 +560,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
560 error = -ERESTARTSYS; 560 error = -ERESTARTSYS;
561 break; 561 break;
562 } 562 }
563 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 563 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
564 test_bit(ATM_VF_CLOSE,&vcc->flags) || 564 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
565 !test_bit(ATM_VF_READY,&vcc->flags)) { 565 !test_bit(ATM_VF_READY, &vcc->flags)) {
566 error = -EPIPE; 566 error = -EPIPE;
567 send_sig(SIGPIPE, current, 0); 567 send_sig(SIGPIPE, current, 0);
568 break; 568 break;
@@ -574,20 +574,20 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
574 goto out; 574 goto out;
575 skb->dev = NULL; /* for paths shared with net_device interfaces */ 575 skb->dev = NULL; /* for paths shared with net_device interfaces */
576 ATM_SKB(skb)->atm_options = vcc->atm_options; 576 ATM_SKB(skb)->atm_options = vcc->atm_options;
577 if (copy_from_user(skb_put(skb,size),buff,size)) { 577 if (copy_from_user(skb_put(skb, size), buff, size)) {
578 kfree_skb(skb); 578 kfree_skb(skb);
579 error = -EFAULT; 579 error = -EFAULT;
580 goto out; 580 goto out;
581 } 581 }
582 if (eff != size) memset(skb->data+size,0,eff-size); 582 if (eff != size)
583 error = vcc->dev->ops->send(vcc,skb); 583 memset(skb->data + size, 0, eff-size);
584 error = vcc->dev->ops->send(vcc, skb);
584 error = error ? error : size; 585 error = error ? error : size;
585out: 586out:
586 release_sock(sk); 587 release_sock(sk);
587 return error; 588 return error;
588} 589}
589 590
590
591unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait) 591unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
592{ 592{
593 struct sock *sk = sock->sk; 593 struct sock *sk = sock->sk;
@@ -623,8 +623,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
623 return mask; 623 return mask;
624} 624}
625 625
626 626static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
627static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
628{ 627{
629 int error; 628 int error;
630 629
@@ -636,25 +635,31 @@ static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
636 qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class || 635 qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class ||
637 qos->txtp.traffic_class != vcc->qos.txtp.traffic_class) 636 qos->txtp.traffic_class != vcc->qos.txtp.traffic_class)
638 return -EINVAL; 637 return -EINVAL;
639 error = adjust_tp(&qos->txtp,qos->aal); 638 error = adjust_tp(&qos->txtp, qos->aal);
640 if (!error) error = adjust_tp(&qos->rxtp,qos->aal); 639 if (!error)
641 if (error) return error; 640 error = adjust_tp(&qos->rxtp, qos->aal);
642 if (!vcc->dev->ops->change_qos) return -EOPNOTSUPP; 641 if (error)
642 return error;
643 if (!vcc->dev->ops->change_qos)
644 return -EOPNOTSUPP;
643 if (sk_atm(vcc)->sk_family == AF_ATMPVC) 645 if (sk_atm(vcc)->sk_family == AF_ATMPVC)
644 return vcc->dev->ops->change_qos(vcc,qos,ATM_MF_SET); 646 return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET);
645 return svc_change_qos(vcc,qos); 647 return svc_change_qos(vcc, qos);
646} 648}
647 649
648
649static int check_tp(const struct atm_trafprm *tp) 650static int check_tp(const struct atm_trafprm *tp)
650{ 651{
651 /* @@@ Should be merged with adjust_tp */ 652 /* @@@ Should be merged with adjust_tp */
652 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0; 653 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
654 return 0;
653 if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr && 655 if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
654 !tp->max_pcr) return -EINVAL; 656 !tp->max_pcr)
655 if (tp->min_pcr == ATM_MAX_PCR) return -EINVAL; 657 return -EINVAL;
658 if (tp->min_pcr == ATM_MAX_PCR)
659 return -EINVAL;
656 if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && 660 if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
657 tp->min_pcr > tp->max_pcr) return -EINVAL; 661 tp->min_pcr > tp->max_pcr)
662 return -EINVAL;
658 /* 663 /*
659 * We allow pcr to be outside [min_pcr,max_pcr], because later 664 * We allow pcr to be outside [min_pcr,max_pcr], because later
660 * adjustment may still push it in the valid range. 665 * adjustment may still push it in the valid range.
@@ -662,7 +667,6 @@ static int check_tp(const struct atm_trafprm *tp)
662 return 0; 667 return 0;
663} 668}
664 669
665
666static int check_qos(const struct atm_qos *qos) 670static int check_qos(const struct atm_qos *qos)
667{ 671{
668 int error; 672 int error;
@@ -672,9 +676,11 @@ static int check_qos(const struct atm_qos *qos)
672 if (qos->txtp.traffic_class != qos->rxtp.traffic_class && 676 if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
673 qos->txtp.traffic_class && qos->rxtp.traffic_class && 677 qos->txtp.traffic_class && qos->rxtp.traffic_class &&
674 qos->txtp.traffic_class != ATM_ANYCLASS && 678 qos->txtp.traffic_class != ATM_ANYCLASS &&
675 qos->rxtp.traffic_class != ATM_ANYCLASS) return -EINVAL; 679 qos->rxtp.traffic_class != ATM_ANYCLASS)
680 return -EINVAL;
676 error = check_tp(&qos->txtp); 681 error = check_tp(&qos->txtp);
677 if (error) return error; 682 if (error)
683 return error;
678 return check_tp(&qos->rxtp); 684 return check_tp(&qos->rxtp);
679} 685}
680 686
@@ -690,37 +696,41 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
690 696
691 vcc = ATM_SD(sock); 697 vcc = ATM_SD(sock);
692 switch (optname) { 698 switch (optname) {
693 case SO_ATMQOS: 699 case SO_ATMQOS:
694 { 700 {
695 struct atm_qos qos; 701 struct atm_qos qos;
696 702
697 if (copy_from_user(&qos,optval,sizeof(qos))) 703 if (copy_from_user(&qos, optval, sizeof(qos)))
698 return -EFAULT; 704 return -EFAULT;
699 error = check_qos(&qos); 705 error = check_qos(&qos);
700 if (error) return error; 706 if (error)
701 if (sock->state == SS_CONNECTED) 707 return error;
702 return atm_change_qos(vcc,&qos); 708 if (sock->state == SS_CONNECTED)
703 if (sock->state != SS_UNCONNECTED) 709 return atm_change_qos(vcc, &qos);
704 return -EBADFD; 710 if (sock->state != SS_UNCONNECTED)
705 vcc->qos = qos; 711 return -EBADFD;
706 set_bit(ATM_VF_HASQOS,&vcc->flags); 712 vcc->qos = qos;
707 return 0; 713 set_bit(ATM_VF_HASQOS, &vcc->flags);
708 } 714 return 0;
709 case SO_SETCLP:
710 if (get_user(value,(unsigned long __user *)optval))
711 return -EFAULT;
712 if (value) vcc->atm_options |= ATM_ATMOPT_CLP;
713 else vcc->atm_options &= ~ATM_ATMOPT_CLP;
714 return 0;
715 default:
716 if (level == SOL_SOCKET) return -EINVAL;
717 break;
718 } 715 }
719 if (!vcc->dev || !vcc->dev->ops->setsockopt) return -EINVAL; 716 case SO_SETCLP:
720 return vcc->dev->ops->setsockopt(vcc,level,optname,optval,optlen); 717 if (get_user(value, (unsigned long __user *)optval))
718 return -EFAULT;
719 if (value)
720 vcc->atm_options |= ATM_ATMOPT_CLP;
721 else
722 vcc->atm_options &= ~ATM_ATMOPT_CLP;
723 return 0;
724 default:
725 if (level == SOL_SOCKET)
726 return -EINVAL;
727 break;
728 }
729 if (!vcc->dev || !vcc->dev->ops->setsockopt)
730 return -EINVAL;
731 return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen);
721} 732}
722 733
723
724int vcc_getsockopt(struct socket *sock, int level, int optname, 734int vcc_getsockopt(struct socket *sock, int level, int optname,
725 char __user *optval, int __user *optlen) 735 char __user *optval, int __user *optlen)
726{ 736{
@@ -734,33 +744,33 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
734 744
735 vcc = ATM_SD(sock); 745 vcc = ATM_SD(sock);
736 switch (optname) { 746 switch (optname) {
737 case SO_ATMQOS: 747 case SO_ATMQOS:
738 if (!test_bit(ATM_VF_HASQOS,&vcc->flags)) 748 if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
739 return -EINVAL; 749 return -EINVAL;
740 return copy_to_user(optval,&vcc->qos,sizeof(vcc->qos)) ? 750 return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
741 -EFAULT : 0; 751 ? -EFAULT : 0;
742 case SO_SETCLP: 752 case SO_SETCLP:
743 return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 753 return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
744 0,(unsigned long __user *)optval) ? -EFAULT : 0; 754 (unsigned long __user *)optval) ? -EFAULT : 0;
745 case SO_ATMPVC: 755 case SO_ATMPVC:
746 { 756 {
747 struct sockaddr_atmpvc pvc; 757 struct sockaddr_atmpvc pvc;
748 758
749 if (!vcc->dev || 759 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
750 !test_bit(ATM_VF_ADDR,&vcc->flags)) 760 return -ENOTCONN;
751 return -ENOTCONN; 761 pvc.sap_family = AF_ATMPVC;
752 pvc.sap_family = AF_ATMPVC; 762 pvc.sap_addr.itf = vcc->dev->number;
753 pvc.sap_addr.itf = vcc->dev->number; 763 pvc.sap_addr.vpi = vcc->vpi;
754 pvc.sap_addr.vpi = vcc->vpi; 764 pvc.sap_addr.vci = vcc->vci;
755 pvc.sap_addr.vci = vcc->vci; 765 return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
756 return copy_to_user(optval,&pvc,sizeof(pvc)) ? 766 }
757 -EFAULT : 0; 767 default:
758 } 768 if (level == SOL_SOCKET)
759 default: 769 return -EINVAL;
760 if (level == SOL_SOCKET) return -EINVAL;
761 break; 770 break;
762 } 771 }
763 if (!vcc->dev || !vcc->dev->ops->getsockopt) return -EINVAL; 772 if (!vcc->dev || !vcc->dev->ops->getsockopt)
773 return -EINVAL;
764 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len); 774 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
765} 775}
766 776
@@ -768,23 +778,27 @@ static int __init atm_init(void)
768{ 778{
769 int error; 779 int error;
770 780
771 if ((error = proto_register(&vcc_proto, 0)) < 0) 781 error = proto_register(&vcc_proto, 0);
782 if (error < 0)
772 goto out; 783 goto out;
773 784 error = atmpvc_init();
774 if ((error = atmpvc_init()) < 0) { 785 if (error < 0) {
775 printk(KERN_ERR "atmpvc_init() failed with %d\n", error); 786 pr_err("atmpvc_init() failed with %d\n", error);
776 goto out_unregister_vcc_proto; 787 goto out_unregister_vcc_proto;
777 } 788 }
778 if ((error = atmsvc_init()) < 0) { 789 error = atmsvc_init();
779 printk(KERN_ERR "atmsvc_init() failed with %d\n", error); 790 if (error < 0) {
791 pr_err("atmsvc_init() failed with %d\n", error);
780 goto out_atmpvc_exit; 792 goto out_atmpvc_exit;
781 } 793 }
782 if ((error = atm_proc_init()) < 0) { 794 error = atm_proc_init();
783 printk(KERN_ERR "atm_proc_init() failed with %d\n",error); 795 if (error < 0) {
796 pr_err("atm_proc_init() failed with %d\n", error);
784 goto out_atmsvc_exit; 797 goto out_atmsvc_exit;
785 } 798 }
786 if ((error = atm_sysfs_init()) < 0) { 799 error = atm_sysfs_init();
787 printk(KERN_ERR "atm_sysfs_init() failed with %d\n",error); 800 if (error < 0) {
801 pr_err("atm_sysfs_init() failed with %d\n", error);
788 goto out_atmproc_exit; 802 goto out_atmproc_exit;
789 } 803 }
790out: 804out:
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 2ea40995dce..62dc8bfe6fe 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -3,6 +3,7 @@
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4/* 2003 John Levon <levon@movementarian.org> */ 4/* 2003 John Levon <levon@movementarian.org> */
5 5
6#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
6 7
7#include <linux/module.h> 8#include <linux/module.h>
8#include <linux/kmod.h> 9#include <linux/kmod.h>
@@ -36,6 +37,7 @@ void register_atm_ioctl(struct atm_ioctl *ioctl)
36 list_add_tail(&ioctl->list, &ioctl_list); 37 list_add_tail(&ioctl->list, &ioctl_list);
37 mutex_unlock(&ioctl_mutex); 38 mutex_unlock(&ioctl_mutex);
38} 39}
40EXPORT_SYMBOL(register_atm_ioctl);
39 41
40void deregister_atm_ioctl(struct atm_ioctl *ioctl) 42void deregister_atm_ioctl(struct atm_ioctl *ioctl)
41{ 43{
@@ -43,129 +45,128 @@ void deregister_atm_ioctl(struct atm_ioctl *ioctl)
43 list_del(&ioctl->list); 45 list_del(&ioctl->list);
44 mutex_unlock(&ioctl_mutex); 46 mutex_unlock(&ioctl_mutex);
45} 47}
46
47EXPORT_SYMBOL(register_atm_ioctl);
48EXPORT_SYMBOL(deregister_atm_ioctl); 48EXPORT_SYMBOL(deregister_atm_ioctl);
49 49
50static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg, int compat) 50static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
51 unsigned long arg, int compat)
51{ 52{
52 struct sock *sk = sock->sk; 53 struct sock *sk = sock->sk;
53 struct atm_vcc *vcc; 54 struct atm_vcc *vcc;
54 int error; 55 int error;
55 struct list_head * pos; 56 struct list_head *pos;
56 void __user *argp = (void __user *)arg; 57 void __user *argp = (void __user *)arg;
57 58
58 vcc = ATM_SD(sock); 59 vcc = ATM_SD(sock);
59 switch (cmd) { 60 switch (cmd) {
60 case SIOCOUTQ: 61 case SIOCOUTQ:
61 if (sock->state != SS_CONNECTED || 62 if (sock->state != SS_CONNECTED ||
62 !test_bit(ATM_VF_READY, &vcc->flags)) { 63 !test_bit(ATM_VF_READY, &vcc->flags)) {
63 error = -EINVAL; 64 error = -EINVAL;
64 goto done; 65 goto done;
65 } 66 }
66 error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk), 67 error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
67 (int __user *) argp) ? -EFAULT : 0; 68 (int __user *)argp) ? -EFAULT : 0;
69 goto done;
70 case SIOCINQ:
71 {
72 struct sk_buff *skb;
73
74 if (sock->state != SS_CONNECTED) {
75 error = -EINVAL;
68 goto done; 76 goto done;
69 case SIOCINQ: 77 }
70 { 78 skb = skb_peek(&sk->sk_receive_queue);
71 struct sk_buff *skb; 79 error = put_user(skb ? skb->len : 0,
72 80 (int __user *)argp) ? -EFAULT : 0;
73 if (sock->state != SS_CONNECTED) { 81 goto done;
74 error = -EINVAL; 82 }
75 goto done; 83 case SIOCGSTAMP: /* borrowed from IP */
76 }
77 skb = skb_peek(&sk->sk_receive_queue);
78 error = put_user(skb ? skb->len : 0,
79 (int __user *)argp) ? -EFAULT : 0;
80 goto done;
81 }
82 case SIOCGSTAMP: /* borrowed from IP */
83#ifdef CONFIG_COMPAT 84#ifdef CONFIG_COMPAT
84 if (compat) 85 if (compat)
85 error = compat_sock_get_timestamp(sk, argp); 86 error = compat_sock_get_timestamp(sk, argp);
86 else 87 else
87#endif 88#endif
88 error = sock_get_timestamp(sk, argp); 89 error = sock_get_timestamp(sk, argp);
89 goto done; 90 goto done;
90 case SIOCGSTAMPNS: /* borrowed from IP */ 91 case SIOCGSTAMPNS: /* borrowed from IP */
91#ifdef CONFIG_COMPAT 92#ifdef CONFIG_COMPAT
92 if (compat) 93 if (compat)
93 error = compat_sock_get_timestampns(sk, argp); 94 error = compat_sock_get_timestampns(sk, argp);
94 else 95 else
95#endif 96#endif
96 error = sock_get_timestampns(sk, argp); 97 error = sock_get_timestampns(sk, argp);
98 goto done;
99 case ATM_SETSC:
100 if (net_ratelimit())
101 pr_warning("ATM_SETSC is obsolete; used by %s:%d\n",
102 current->comm, task_pid_nr(current));
103 error = 0;
104 goto done;
105 case ATMSIGD_CTRL:
106 if (!capable(CAP_NET_ADMIN)) {
107 error = -EPERM;
97 goto done; 108 goto done;
98 case ATM_SETSC: 109 }
99 if (net_ratelimit()) 110 /*
100 printk(KERN_WARNING "ATM_SETSC is obsolete; used by %s:%d\n", 111 * The user/kernel protocol for exchanging signalling
101 current->comm, task_pid_nr(current)); 112 * info uses kernel pointers as opaque references,
102 error = 0; 113 * so the holder of the file descriptor can scribble
114 * on the kernel... so we should make sure that we
115 * have the same privileges that /proc/kcore needs
116 */
117 if (!capable(CAP_SYS_RAWIO)) {
118 error = -EPERM;
103 goto done; 119 goto done;
104 case ATMSIGD_CTRL: 120 }
105 if (!capable(CAP_NET_ADMIN)) {
106 error = -EPERM;
107 goto done;
108 }
109 /*
110 * The user/kernel protocol for exchanging signalling
111 * info uses kernel pointers as opaque references,
112 * so the holder of the file descriptor can scribble
113 * on the kernel... so we should make sure that we
114 * have the same privileges that /proc/kcore needs
115 */
116 if (!capable(CAP_SYS_RAWIO)) {
117 error = -EPERM;
118 goto done;
119 }
120#ifdef CONFIG_COMPAT 121#ifdef CONFIG_COMPAT
121 /* WTF? I don't even want to _think_ about making this 122 /* WTF? I don't even want to _think_ about making this
122 work for 32-bit userspace. TBH I don't really want 123 work for 32-bit userspace. TBH I don't really want
123 to think about it at all. dwmw2. */ 124 to think about it at all. dwmw2. */
124 if (compat) { 125 if (compat) {
125 if (net_ratelimit()) 126 if (net_ratelimit())
126 printk(KERN_WARNING "32-bit task cannot be atmsigd\n"); 127 pr_warning("32-bit task cannot be atmsigd\n");
127 error = -EINVAL; 128 error = -EINVAL;
128 goto done; 129 goto done;
129 } 130 }
130#endif 131#endif
131 error = sigd_attach(vcc); 132 error = sigd_attach(vcc);
132 if (!error) 133 if (!error)
133 sock->state = SS_CONNECTED; 134 sock->state = SS_CONNECTED;
135 goto done;
136 case ATM_SETBACKEND:
137 case ATM_NEWBACKENDIF:
138 {
139 atm_backend_t backend;
140 error = get_user(backend, (atm_backend_t __user *)argp);
141 if (error)
134 goto done; 142 goto done;
135 case ATM_SETBACKEND: 143 switch (backend) {
136 case ATM_NEWBACKENDIF: 144 case ATM_BACKEND_PPP:
137 { 145 request_module("pppoatm");
138 atm_backend_t backend;
139 error = get_user(backend, (atm_backend_t __user *) argp);
140 if (error)
141 goto done;
142 switch (backend) {
143 case ATM_BACKEND_PPP:
144 request_module("pppoatm");
145 break;
146 case ATM_BACKEND_BR2684:
147 request_module("br2684");
148 break;
149 }
150 }
151 break;
152 case ATMMPC_CTRL:
153 case ATMMPC_DATA:
154 request_module("mpoa");
155 break;
156 case ATMARPD_CTRL:
157 request_module("clip");
158 break; 146 break;
159 case ATMLEC_CTRL: 147 case ATM_BACKEND_BR2684:
160 request_module("lec"); 148 request_module("br2684");
161 break; 149 break;
150 }
151 break;
152 }
153 case ATMMPC_CTRL:
154 case ATMMPC_DATA:
155 request_module("mpoa");
156 break;
157 case ATMARPD_CTRL:
158 request_module("clip");
159 break;
160 case ATMLEC_CTRL:
161 request_module("lec");
162 break;
162 } 163 }
163 164
164 error = -ENOIOCTLCMD; 165 error = -ENOIOCTLCMD;
165 166
166 mutex_lock(&ioctl_mutex); 167 mutex_lock(&ioctl_mutex);
167 list_for_each(pos, &ioctl_list) { 168 list_for_each(pos, &ioctl_list) {
168 struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list); 169 struct atm_ioctl *ic = list_entry(pos, struct atm_ioctl, list);
169 if (try_module_get(ic->owner)) { 170 if (try_module_get(ic->owner)) {
170 error = ic->ioctl(sock, cmd, arg); 171 error = ic->ioctl(sock, cmd, arg);
171 module_put(ic->owner); 172 module_put(ic->owner);
@@ -184,7 +185,6 @@ done:
184 return error; 185 return error;
185} 186}
186 187
187
188int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 188int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
189{ 189{
190 return do_vcc_ioctl(sock, cmd, arg, 0); 190 return do_vcc_ioctl(sock, cmd, arg, 0);
@@ -287,8 +287,8 @@ static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
287 sioc = compat_alloc_user_space(sizeof(*sioc)); 287 sioc = compat_alloc_user_space(sizeof(*sioc));
288 sioc32 = compat_ptr(arg); 288 sioc32 = compat_ptr(arg);
289 289
290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) 290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
291 || get_user(data, &sioc32->arg)) 291 get_user(data, &sioc32->arg))
292 return -EFAULT; 292 return -EFAULT;
293 datap = compat_ptr(data); 293 datap = compat_ptr(data);
294 if (put_user(datap, &sioc->arg)) 294 if (put_user(datap, &sioc->arg))
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 42749b7b917..5da5753157f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -4,6 +4,8 @@
4 * Marko Kiiskila <mkiiskila@yahoo.com> 4 * Marko Kiiskila <mkiiskila@yahoo.com>
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
8
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/bitops.h> 10#include <linux/bitops.h>
9#include <linux/capability.h> 11#include <linux/capability.h>
@@ -16,7 +18,7 @@
16#include <linux/skbuff.h> 18#include <linux/skbuff.h>
17#include <linux/ip.h> 19#include <linux/ip.h>
18#include <asm/byteorder.h> 20#include <asm/byteorder.h>
19#include <asm/uaccess.h> 21#include <linux/uaccess.h>
20#include <net/arp.h> 22#include <net/arp.h>
21#include <net/dst.h> 23#include <net/dst.h>
22#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
@@ -85,17 +87,19 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
85 int is_rdesc, 87 int is_rdesc,
86 struct lec_arp_table **ret_entry); 88 struct lec_arp_table **ret_entry);
87static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, 89static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
88 const unsigned char *atm_addr, unsigned long remoteflag, 90 const unsigned char *atm_addr,
91 unsigned long remoteflag,
89 unsigned int targetless_le_arp); 92 unsigned int targetless_le_arp);
90static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); 93static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id);
91static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); 94static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc);
92static void lec_set_flush_tran_id(struct lec_priv *priv, 95static void lec_set_flush_tran_id(struct lec_priv *priv,
93 const unsigned char *atm_addr, 96 const unsigned char *atm_addr,
94 unsigned long tran_id); 97 unsigned long tran_id);
95static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, 98static void lec_vcc_added(struct lec_priv *priv,
99 const struct atmlec_ioc *ioc_data,
96 struct atm_vcc *vcc, 100 struct atm_vcc *vcc,
97 void (*old_push) (struct atm_vcc *vcc, 101 void (*old_push)(struct atm_vcc *vcc,
98 struct sk_buff *skb)); 102 struct sk_buff *skb));
99static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc); 103static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc);
100 104
101/* must be done under lec_arp_lock */ 105/* must be done under lec_arp_lock */
@@ -110,7 +114,6 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
110 kfree(entry); 114 kfree(entry);
111} 115}
112 116
113
114static struct lane2_ops lane2_ops = { 117static struct lane2_ops lane2_ops = {
115 lane2_resolve, /* resolve, spec 3.1.3 */ 118 lane2_resolve, /* resolve, spec 3.1.3 */
116 lane2_associate_req, /* associate_req, spec 3.1.4 */ 119 lane2_associate_req, /* associate_req, spec 3.1.4 */
@@ -148,7 +151,8 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
148 mesg = (struct atmlec_msg *)skb2->data; 151 mesg = (struct atmlec_msg *)skb2->data;
149 mesg->type = l_topology_change; 152 mesg->type = l_topology_change;
150 buff += 4; 153 buff += 4;
151 mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */ 154 mesg->content.normal.flag = *buff & 0x01;
155 /* 0x01 is topology change */
152 156
153 priv = netdev_priv(dev); 157 priv = netdev_priv(dev);
154 atm_force_charge(priv->lecd, skb2->truesize); 158 atm_force_charge(priv->lecd, skb2->truesize);
@@ -242,7 +246,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
242 246
243static void lec_tx_timeout(struct net_device *dev) 247static void lec_tx_timeout(struct net_device *dev)
244{ 248{
245 printk(KERN_INFO "%s: tx timeout\n", dev->name); 249 pr_info("%s\n", dev->name);
246 dev->trans_start = jiffies; 250 dev->trans_start = jiffies;
247 netif_wake_queue(dev); 251 netif_wake_queue(dev);
248} 252}
@@ -261,14 +265,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
261 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ 265 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
262#endif 266#endif
263 int is_rdesc; 267 int is_rdesc;
264#if DUMP_PACKETS > 0
265 char buf[300];
266 int i = 0;
267#endif /* DUMP_PACKETS >0 */
268 268
269 pr_debug("lec_start_xmit called\n"); 269 pr_debug("called\n");
270 if (!priv->lecd) { 270 if (!priv->lecd) {
271 printk("%s:No lecd attached\n", dev->name); 271 pr_info("%s:No lecd attached\n", dev->name);
272 dev->stats.tx_errors++; 272 dev->stats.tx_errors++;
273 netif_stop_queue(dev); 273 netif_stop_queue(dev);
274 kfree_skb(skb); 274 kfree_skb(skb);
@@ -276,8 +276,8 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
276 } 276 }
277 277
278 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n", 278 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
279 (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), 279 (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
280 (long)skb_end_pointer(skb)); 280 (long)skb_end_pointer(skb));
281#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 281#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
282 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) 282 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
283 lec_handle_bridge(skb, dev); 283 lec_handle_bridge(skb, dev);
@@ -285,8 +285,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
285 285
286 /* Make sure we have room for lec_id */ 286 /* Make sure we have room for lec_id */
287 if (skb_headroom(skb) < 2) { 287 if (skb_headroom(skb) < 2) {
288 288 pr_debug("reallocating skb\n");
289 pr_debug("lec_start_xmit: reallocating skb\n");
290 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); 289 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
291 kfree_skb(skb); 290 kfree_skb(skb);
292 if (skb2 == NULL) 291 if (skb2 == NULL)
@@ -313,23 +312,17 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
313 } 312 }
314#endif 313#endif
315 314
316#if DUMP_PACKETS > 0
317 printk("%s: send datalen:%ld lecid:%4.4x\n", dev->name,
318 skb->len, priv->lecid);
319#if DUMP_PACKETS >= 2 315#if DUMP_PACKETS >= 2
320 for (i = 0; i < skb->len && i < 99; i++) { 316#define MAX_DUMP_SKB 99
321 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
322 }
323#elif DUMP_PACKETS >= 1 317#elif DUMP_PACKETS >= 1
324 for (i = 0; i < skb->len && i < 30; i++) { 318#define MAX_DUMP_SKB 30
325 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]); 319#endif
326 } 320#if DUMP_PACKETS >= 1
321 printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n",
322 dev->name, skb->len, priv->lecid);
323 print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
324 skb->data, min(skb->len, MAX_DUMP_SKB), true);
327#endif /* DUMP_PACKETS >= 1 */ 325#endif /* DUMP_PACKETS >= 1 */
328 if (i == skb->len)
329 printk("%s\n", buf);
330 else
331 printk("%s...\n", buf);
332#endif /* DUMP_PACKETS > 0 */
333 326
334 /* Minimum ethernet-frame size */ 327 /* Minimum ethernet-frame size */
335#ifdef CONFIG_TR 328#ifdef CONFIG_TR
@@ -367,31 +360,28 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
367#endif 360#endif
368 entry = NULL; 361 entry = NULL;
369 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); 362 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
370 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", dev->name, 363 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
371 vcc, vcc ? vcc->flags : 0, entry); 364 dev->name, vcc, vcc ? vcc->flags : 0, entry);
372 if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) { 365 if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) {
373 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { 366 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) {
374 pr_debug("%s:lec_start_xmit: queuing packet, ", 367 pr_debug("%s:queuing packet, MAC address %pM\n",
375 dev->name); 368 dev->name, lec_h->h_dest);
376 pr_debug("MAC address %pM\n", lec_h->h_dest);
377 skb_queue_tail(&entry->tx_wait, skb); 369 skb_queue_tail(&entry->tx_wait, skb);
378 } else { 370 } else {
379 pr_debug 371 pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n",
380 ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", 372 dev->name, lec_h->h_dest);
381 dev->name);
382 pr_debug("MAC address %pM\n", lec_h->h_dest);
383 dev->stats.tx_dropped++; 373 dev->stats.tx_dropped++;
384 dev_kfree_skb(skb); 374 dev_kfree_skb(skb);
385 } 375 }
386 goto out; 376 goto out;
387 } 377 }
388#if DUMP_PACKETS > 0 378#if DUMP_PACKETS > 0
389 printk("%s:sending to vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); 379 printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n",
380 dev->name, vcc->vpi, vcc->vci);
390#endif /* DUMP_PACKETS > 0 */ 381#endif /* DUMP_PACKETS > 0 */
391 382
392 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { 383 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) {
393 pr_debug("lec.c: emptying tx queue, "); 384 pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest);
394 pr_debug("MAC address %pM\n", lec_h->h_dest);
395 lec_send(vcc, skb2); 385 lec_send(vcc, skb2);
396 } 386 }
397 387
@@ -444,14 +434,12 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
444 pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type); 434 pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type);
445 switch (mesg->type) { 435 switch (mesg->type) {
446 case l_set_mac_addr: 436 case l_set_mac_addr:
447 for (i = 0; i < 6; i++) { 437 for (i = 0; i < 6; i++)
448 dev->dev_addr[i] = mesg->content.normal.mac_addr[i]; 438 dev->dev_addr[i] = mesg->content.normal.mac_addr[i];
449 }
450 break; 439 break;
451 case l_del_mac_addr: 440 case l_del_mac_addr:
452 for (i = 0; i < 6; i++) { 441 for (i = 0; i < 6; i++)
453 dev->dev_addr[i] = 0; 442 dev->dev_addr[i] = 0;
454 }
455 break; 443 break;
456 case l_addr_delete: 444 case l_addr_delete:
457 lec_addr_delete(priv, mesg->content.normal.atm_addr, 445 lec_addr_delete(priv, mesg->content.normal.atm_addr,
@@ -477,10 +465,10 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
477 mesg->content.normal.atm_addr, 465 mesg->content.normal.atm_addr,
478 mesg->content.normal.flag, 466 mesg->content.normal.flag,
479 mesg->content.normal.targetless_le_arp); 467 mesg->content.normal.targetless_le_arp);
480 pr_debug("lec: in l_arp_update\n"); 468 pr_debug("in l_arp_update\n");
481 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */ 469 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */
482 pr_debug("lec: LANE2 3.1.5, got tlvs, size %d\n", 470 pr_debug("LANE2 3.1.5, got tlvs, size %d\n",
483 mesg->sizeoftlvs); 471 mesg->sizeoftlvs);
484 lane2_associate_ind(dev, mesg->content.normal.mac_addr, 472 lane2_associate_ind(dev, mesg->content.normal.mac_addr,
485 tmp, mesg->sizeoftlvs); 473 tmp, mesg->sizeoftlvs);
486 } 474 }
@@ -499,13 +487,14 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
499 priv->flush_timeout = (mesg->content.config.flush_timeout * HZ); 487 priv->flush_timeout = (mesg->content.config.flush_timeout * HZ);
500 priv->path_switching_delay = 488 priv->path_switching_delay =
501 (mesg->content.config.path_switching_delay * HZ); 489 (mesg->content.config.path_switching_delay * HZ);
502 priv->lane_version = mesg->content.config.lane_version; /* LANE2 */ 490 priv->lane_version = mesg->content.config.lane_version;
491 /* LANE2 */
503 priv->lane2_ops = NULL; 492 priv->lane2_ops = NULL;
504 if (priv->lane_version > 1) 493 if (priv->lane_version > 1)
505 priv->lane2_ops = &lane2_ops; 494 priv->lane2_ops = &lane2_ops;
506 if (dev_set_mtu(dev, mesg->content.config.mtu)) 495 if (dev_set_mtu(dev, mesg->content.config.mtu))
507 printk("%s: change_mtu to %d failed\n", dev->name, 496 pr_info("%s: change_mtu to %d failed\n",
508 mesg->content.config.mtu); 497 dev->name, mesg->content.config.mtu);
509 priv->is_proxy = mesg->content.config.is_proxy; 498 priv->is_proxy = mesg->content.config.is_proxy;
510 break; 499 break;
511 case l_flush_tran_id: 500 case l_flush_tran_id:
@@ -518,40 +507,35 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
518 break; 507 break;
519 case l_should_bridge: 508 case l_should_bridge:
520#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 509#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
521 { 510 {
522 pr_debug("%s: bridge zeppelin asks about %pM\n", 511 pr_debug("%s: bridge zeppelin asks about %pM\n",
523 dev->name, mesg->content.proxy.mac_addr); 512 dev->name, mesg->content.proxy.mac_addr);
524 513
525 if (br_fdb_test_addr_hook == NULL) 514 if (br_fdb_test_addr_hook == NULL)
526 break; 515 break;
527 516
528 if (br_fdb_test_addr_hook(dev, 517 if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) {
529 mesg->content.proxy.mac_addr)) { 518 /* hit from bridge table, send LE_ARP_RESPONSE */
530 /* hit from bridge table, send LE_ARP_RESPONSE */ 519 struct sk_buff *skb2;
531 struct sk_buff *skb2; 520 struct sock *sk;
532 struct sock *sk; 521
533 522 pr_debug("%s: entry found, responding to zeppelin\n",
534 pr_debug 523 dev->name);
535 ("%s: entry found, responding to zeppelin\n", 524 skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
536 dev->name); 525 if (skb2 == NULL)
537 skb2 = 526 break;
538 alloc_skb(sizeof(struct atmlec_msg), 527 skb2->len = sizeof(struct atmlec_msg);
539 GFP_ATOMIC); 528 skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg));
540 if (skb2 == NULL) 529 atm_force_charge(priv->lecd, skb2->truesize);
541 break; 530 sk = sk_atm(priv->lecd);
542 skb2->len = sizeof(struct atmlec_msg); 531 skb_queue_tail(&sk->sk_receive_queue, skb2);
543 skb_copy_to_linear_data(skb2, mesg, 532 sk->sk_data_ready(sk, skb2->len);
544 sizeof(*mesg));
545 atm_force_charge(priv->lecd, skb2->truesize);
546 sk = sk_atm(priv->lecd);
547 skb_queue_tail(&sk->sk_receive_queue, skb2);
548 sk->sk_data_ready(sk, skb2->len);
549 }
550 } 533 }
534 }
551#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 535#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
552 break; 536 break;
553 default: 537 default:
554 printk("%s: Unknown message type %d\n", dev->name, mesg->type); 538 pr_info("%s: Unknown message type %d\n", dev->name, mesg->type);
555 dev_kfree_skb(skb); 539 dev_kfree_skb(skb);
556 return -EINVAL; 540 return -EINVAL;
557 } 541 }
@@ -572,14 +556,13 @@ static void lec_atm_close(struct atm_vcc *vcc)
572 lec_arp_destroy(priv); 556 lec_arp_destroy(priv);
573 557
574 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 558 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
575 printk("%s lec_atm_close: closing with messages pending\n", 559 pr_info("%s closing with messages pending\n", dev->name);
576 dev->name); 560 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
577 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue)) != NULL) {
578 atm_return(vcc, skb->truesize); 561 atm_return(vcc, skb->truesize);
579 dev_kfree_skb(skb); 562 dev_kfree_skb(skb);
580 } 563 }
581 564
582 printk("%s: Shut down!\n", dev->name); 565 pr_info("%s: Shut down!\n", dev->name);
583 module_put(THIS_MODULE); 566 module_put(THIS_MODULE);
584} 567}
585 568
@@ -608,9 +591,8 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
608 struct sk_buff *skb; 591 struct sk_buff *skb;
609 struct atmlec_msg *mesg; 592 struct atmlec_msg *mesg;
610 593
611 if (!priv || !priv->lecd) { 594 if (!priv || !priv->lecd)
612 return -1; 595 return -1;
613 }
614 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); 596 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
615 if (!skb) 597 if (!skb)
616 return -1; 598 return -1;
@@ -633,7 +615,7 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
633 sk->sk_data_ready(sk, skb->len); 615 sk->sk_data_ready(sk, skb->len);
634 616
635 if (data != NULL) { 617 if (data != NULL) {
636 pr_debug("lec: about to send %d bytes of data\n", data->len); 618 pr_debug("about to send %d bytes of data\n", data->len);
637 atm_force_charge(priv->lecd, data->truesize); 619 atm_force_charge(priv->lecd, data->truesize);
638 skb_queue_tail(&sk->sk_receive_queue, data); 620 skb_queue_tail(&sk->sk_receive_queue, data);
639 sk->sk_data_ready(sk, skb->len); 621 sk->sk_data_ready(sk, skb->len);
@@ -691,36 +673,28 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
691 struct net_device *dev = (struct net_device *)vcc->proto_data; 673 struct net_device *dev = (struct net_device *)vcc->proto_data;
692 struct lec_priv *priv = netdev_priv(dev); 674 struct lec_priv *priv = netdev_priv(dev);
693 675
694#if DUMP_PACKETS >0 676#if DUMP_PACKETS > 0
695 int i = 0; 677 printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n",
696 char buf[300]; 678 dev->name, vcc->vpi, vcc->vci);
697
698 printk("%s: lec_push vcc vpi:%d vci:%d\n", dev->name,
699 vcc->vpi, vcc->vci);
700#endif 679#endif
701 if (!skb) { 680 if (!skb) {
702 pr_debug("%s: null skb\n", dev->name); 681 pr_debug("%s: null skb\n", dev->name);
703 lec_vcc_close(priv, vcc); 682 lec_vcc_close(priv, vcc);
704 return; 683 return;
705 } 684 }
706#if DUMP_PACKETS > 0
707 printk("%s: rcv datalen:%ld lecid:%4.4x\n", dev->name,
708 skb->len, priv->lecid);
709#if DUMP_PACKETS >= 2 685#if DUMP_PACKETS >= 2
710 for (i = 0; i < skb->len && i < 99; i++) { 686#define MAX_SKB_DUMP 99
711 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
712 }
713#elif DUMP_PACKETS >= 1 687#elif DUMP_PACKETS >= 1
714 for (i = 0; i < skb->len && i < 30; i++) { 688#define MAX_SKB_DUMP 30
715 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]); 689#endif
716 } 690#if DUMP_PACKETS > 0
717#endif /* DUMP_PACKETS >= 1 */ 691 printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n",
718 if (i == skb->len) 692 dev->name, skb->len, priv->lecid);
719 printk("%s\n", buf); 693 print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
720 else 694 skb->data, min(MAX_SKB_DUMP, skb->len), true);
721 printk("%s...\n", buf);
722#endif /* DUMP_PACKETS > 0 */ 695#endif /* DUMP_PACKETS > 0 */
723 if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */ 696 if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) {
697 /* Control frame, to daemon */
724 struct sock *sk = sk_atm(vcc); 698 struct sock *sk = sk_atm(vcc);
725 699
726 pr_debug("%s: To daemon\n", dev->name); 700 pr_debug("%s: To daemon\n", dev->name);
@@ -778,9 +752,8 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
778 dev_kfree_skb(skb); 752 dev_kfree_skb(skb);
779 return; 753 return;
780 } 754 }
781 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 755 if (!hlist_empty(&priv->lec_arp_empty_ones))
782 lec_arp_check_empties(priv, vcc, skb); 756 lec_arp_check_empties(priv, vcc, skb);
783 }
784 skb_pull(skb, 2); /* skip lec_id */ 757 skb_pull(skb, 2); /* skip lec_id */
785#ifdef CONFIG_TR 758#ifdef CONFIG_TR
786 if (priv->is_trdev) 759 if (priv->is_trdev)
@@ -801,7 +774,7 @@ static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
801 struct net_device *dev = skb->dev; 774 struct net_device *dev = skb->dev;
802 775
803 if (vpriv == NULL) { 776 if (vpriv == NULL) {
804 printk("lec_pop(): vpriv = NULL!?!?!?\n"); 777 pr_info("vpriv = NULL!?!?!?\n");
805 return; 778 return;
806 } 779 }
807 780
@@ -822,15 +795,13 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
822 795
823 /* Lecd must be up in this case */ 796 /* Lecd must be up in this case */
824 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); 797 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
825 if (bytes_left != 0) { 798 if (bytes_left != 0)
826 printk 799 pr_info("copy from user failed for %d bytes\n", bytes_left);
827 ("lec: lec_vcc_attach, copy from user failed for %d bytes\n",
828 bytes_left);
829 }
830 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || 800 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
831 !dev_lec[ioc_data.dev_num]) 801 !dev_lec[ioc_data.dev_num])
832 return -EINVAL; 802 return -EINVAL;
833 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 803 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
804 if (!vpriv)
834 return -ENOMEM; 805 return -ENOMEM;
835 vpriv->xoff = 0; 806 vpriv->xoff = 0;
836 vpriv->old_pop = vcc->pop; 807 vpriv->old_pop = vcc->pop;
@@ -921,9 +892,8 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
921 priv->flush_timeout = (4 * HZ); 892 priv->flush_timeout = (4 * HZ);
922 priv->path_switching_delay = (6 * HZ); 893 priv->path_switching_delay = (6 * HZ);
923 894
924 if (dev_lec[i]->flags & IFF_UP) { 895 if (dev_lec[i]->flags & IFF_UP)
925 netif_start_queue(dev_lec[i]); 896 netif_start_queue(dev_lec[i]);
926 }
927 __module_get(THIS_MODULE); 897 __module_get(THIS_MODULE);
928 return i; 898 return i;
929} 899}
@@ -1125,7 +1095,9 @@ static int lec_seq_show(struct seq_file *seq, void *v)
1125 else { 1095 else {
1126 struct lec_state *state = seq->private; 1096 struct lec_state *state = seq->private;
1127 struct net_device *dev = state->dev; 1097 struct net_device *dev = state->dev;
1128 struct lec_arp_table *entry = hlist_entry(state->node, struct lec_arp_table, next); 1098 struct lec_arp_table *entry = hlist_entry(state->node,
1099 struct lec_arp_table,
1100 next);
1129 1101
1130 seq_printf(seq, "%s ", dev->name); 1102 seq_printf(seq, "%s ", dev->name);
1131 lec_info(seq, entry); 1103 lec_info(seq, entry);
@@ -1199,13 +1171,13 @@ static int __init lane_module_init(void)
1199 1171
1200 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); 1172 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
1201 if (!p) { 1173 if (!p) {
1202 printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n"); 1174 pr_err("Unable to initialize /proc/net/atm/lec\n");
1203 return -ENOMEM; 1175 return -ENOMEM;
1204 } 1176 }
1205#endif 1177#endif
1206 1178
1207 register_atm_ioctl(&lane_ioctl_ops); 1179 register_atm_ioctl(&lane_ioctl_ops);
1208 printk("lec.c: " __DATE__ " " __TIME__ " initialized\n"); 1180 pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n");
1209 return 0; 1181 return 0;
1210} 1182}
1211 1183
@@ -1294,13 +1266,13 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1294 struct lec_priv *priv = netdev_priv(dev); 1266 struct lec_priv *priv = netdev_priv(dev);
1295 1267
1296 if (compare_ether_addr(lan_dst, dev->dev_addr)) 1268 if (compare_ether_addr(lan_dst, dev->dev_addr))
1297 return (0); /* not our mac address */ 1269 return 0; /* not our mac address */
1298 1270
1299 kfree(priv->tlvs); /* NULL if there was no previous association */ 1271 kfree(priv->tlvs); /* NULL if there was no previous association */
1300 1272
1301 priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); 1273 priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
1302 if (priv->tlvs == NULL) 1274 if (priv->tlvs == NULL)
1303 return (0); 1275 return 0;
1304 priv->sizeoftlvs = sizeoftlvs; 1276 priv->sizeoftlvs = sizeoftlvs;
1305 1277
1306 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); 1278 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
@@ -1310,12 +1282,12 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1310 skb_copy_to_linear_data(skb, tlvs, sizeoftlvs); 1282 skb_copy_to_linear_data(skb, tlvs, sizeoftlvs);
1311 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb); 1283 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
1312 if (retval != 0) 1284 if (retval != 0)
1313 printk("lec.c: lane2_associate_req() failed\n"); 1285 pr_info("lec.c: lane2_associate_req() failed\n");
1314 /* 1286 /*
1315 * If the previous association has changed we must 1287 * If the previous association has changed we must
1316 * somehow notify other LANE entities about the change 1288 * somehow notify other LANE entities about the change
1317 */ 1289 */
1318 return (1); 1290 return 1;
1319} 1291}
1320 1292
1321/* 1293/*
@@ -1348,12 +1320,12 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1348 entry->sizeoftlvs = sizeoftlvs; 1320 entry->sizeoftlvs = sizeoftlvs;
1349#endif 1321#endif
1350#if 0 1322#if 0
1351 printk("lec.c: lane2_associate_ind()\n"); 1323 pr_info("\n");
1352 printk("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs); 1324 pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs);
1353 while (i < sizeoftlvs) 1325 while (i < sizeoftlvs)
1354 printk("%02x ", tlvs[i++]); 1326 pr_cont("%02x ", tlvs[i++]);
1355 1327
1356 printk("\n"); 1328 pr_cont("\n");
1357#endif 1329#endif
1358 1330
1359 /* tell MPOA about the TLVs we saw */ 1331 /* tell MPOA about the TLVs we saw */
@@ -1373,15 +1345,15 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1373 1345
1374#include <linux/types.h> 1346#include <linux/types.h>
1375#include <linux/timer.h> 1347#include <linux/timer.h>
1376#include <asm/param.h> 1348#include <linux/param.h>
1377#include <asm/atomic.h> 1349#include <asm/atomic.h>
1378#include <linux/inetdevice.h> 1350#include <linux/inetdevice.h>
1379#include <net/route.h> 1351#include <net/route.h>
1380 1352
1381#if 0 1353#if 0
1382#define pr_debug(format,args...) 1354#define pr_debug(format, args...)
1383/* 1355/*
1384#define pr_debug printk 1356 #define pr_debug printk
1385*/ 1357*/
1386#endif 1358#endif
1387#define DEBUG_ARP_TABLE 0 1359#define DEBUG_ARP_TABLE 0
@@ -1395,7 +1367,7 @@ static void lec_arp_expire_arp(unsigned long data);
1395 * Arp table funcs 1367 * Arp table funcs
1396 */ 1368 */
1397 1369
1398#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE -1)) 1370#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1))
1399 1371
1400/* 1372/*
1401 * Initialization of arp-cache 1373 * Initialization of arp-cache
@@ -1404,9 +1376,8 @@ static void lec_arp_init(struct lec_priv *priv)
1404{ 1376{
1405 unsigned short i; 1377 unsigned short i;
1406 1378
1407 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1379 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
1408 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1380 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1409 }
1410 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1381 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1411 INIT_HLIST_HEAD(&priv->lec_no_forward); 1382 INIT_HLIST_HEAD(&priv->lec_no_forward);
1412 INIT_HLIST_HEAD(&priv->mcast_fwds); 1383 INIT_HLIST_HEAD(&priv->mcast_fwds);
@@ -1450,10 +1421,7 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
1450 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; 1421 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])];
1451 hlist_add_head(&entry->next, tmp); 1422 hlist_add_head(&entry->next, tmp);
1452 1423
1453 pr_debug("LEC_ARP: Added entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1424 pr_debug("Added entry:%pM\n", entry->mac_addr);
1454 0xff & entry->mac_addr[0], 0xff & entry->mac_addr[1],
1455 0xff & entry->mac_addr[2], 0xff & entry->mac_addr[3],
1456 0xff & entry->mac_addr[4], 0xff & entry->mac_addr[5]);
1457} 1425}
1458 1426
1459/* 1427/*
@@ -1466,20 +1434,23 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1466 struct lec_arp_table *entry; 1434 struct lec_arp_table *entry;
1467 int i, remove_vcc = 1; 1435 int i, remove_vcc = 1;
1468 1436
1469 if (!to_remove) { 1437 if (!to_remove)
1470 return -1; 1438 return -1;
1471 }
1472 1439
1473 hlist_del(&to_remove->next); 1440 hlist_del(&to_remove->next);
1474 del_timer(&to_remove->timer); 1441 del_timer(&to_remove->timer);
1475 1442
1476 /* If this is the only MAC connected to this VCC, also tear down the VCC */ 1443 /*
1444 * If this is the only MAC connected to this VCC,
1445 * also tear down the VCC
1446 */
1477 if (to_remove->status >= ESI_FLUSH_PENDING) { 1447 if (to_remove->status >= ESI_FLUSH_PENDING) {
1478 /* 1448 /*
1479 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT 1449 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
1480 */ 1450 */
1481 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1451 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1482 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 1452 hlist_for_each_entry(entry, node,
1453 &priv->lec_arp_tables[i], next) {
1483 if (memcmp(to_remove->atm_addr, 1454 if (memcmp(to_remove->atm_addr,
1484 entry->atm_addr, ATM_ESA_LEN) == 0) { 1455 entry->atm_addr, ATM_ESA_LEN) == 0) {
1485 remove_vcc = 0; 1456 remove_vcc = 0;
@@ -1492,10 +1463,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1492 } 1463 }
1493 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */ 1464 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */
1494 1465
1495 pr_debug("LEC_ARP: Removed entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1466 pr_debug("Removed entry:%pM\n", to_remove->mac_addr);
1496 0xff & to_remove->mac_addr[0], 0xff & to_remove->mac_addr[1],
1497 0xff & to_remove->mac_addr[2], 0xff & to_remove->mac_addr[3],
1498 0xff & to_remove->mac_addr[4], 0xff & to_remove->mac_addr[5]);
1499 return 0; 1467 return 0;
1500} 1468}
1501 1469
@@ -1513,9 +1481,8 @@ static const char *get_status_string(unsigned char st)
1513 return "ESI_FLUSH_PENDING"; 1481 return "ESI_FLUSH_PENDING";
1514 case ESI_FORWARD_DIRECT: 1482 case ESI_FORWARD_DIRECT:
1515 return "ESI_FORWARD_DIRECT"; 1483 return "ESI_FORWARD_DIRECT";
1516 default:
1517 return "<UNKNOWN>";
1518 } 1484 }
1485 return "<UNKNOWN>";
1519} 1486}
1520 1487
1521static void dump_arp_table(struct lec_priv *priv) 1488static void dump_arp_table(struct lec_priv *priv)
@@ -1525,18 +1492,15 @@ static void dump_arp_table(struct lec_priv *priv)
1525 char buf[256]; 1492 char buf[256];
1526 int i, j, offset; 1493 int i, j, offset;
1527 1494
1528 printk("Dump %p:\n", priv); 1495 pr_info("Dump %p:\n", priv);
1529 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1496 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1530 hlist_for_each_entry(rulla, node, &priv->lec_arp_tables[i], next) { 1497 hlist_for_each_entry(rulla, node,
1498 &priv->lec_arp_tables[i], next) {
1531 offset = 0; 1499 offset = 0;
1532 offset += sprintf(buf, "%d: %p\n", i, rulla); 1500 offset += sprintf(buf, "%d: %p\n", i, rulla);
1533 offset += sprintf(buf + offset, "Mac:"); 1501 offset += sprintf(buf + offset, "Mac: %pM",
1534 for (j = 0; j < ETH_ALEN; j++) { 1502 rulla->mac_addr);
1535 offset += sprintf(buf + offset, 1503 offset += sprintf(buf + offset, " Atm:");
1536 "%2.2x ",
1537 rulla->mac_addr[j] & 0xff);
1538 }
1539 offset += sprintf(buf + offset, "Atm:");
1540 for (j = 0; j < ATM_ESA_LEN; j++) { 1504 for (j = 0; j < ATM_ESA_LEN; j++) {
1541 offset += sprintf(buf + offset, 1505 offset += sprintf(buf + offset,
1542 "%2.2x ", 1506 "%2.2x ",
@@ -1556,20 +1520,16 @@ static void dump_arp_table(struct lec_priv *priv)
1556 "Flags:%x, Packets_flooded:%x, Status: %s ", 1520 "Flags:%x, Packets_flooded:%x, Status: %s ",
1557 rulla->flags, rulla->packets_flooded, 1521 rulla->flags, rulla->packets_flooded,
1558 get_status_string(rulla->status)); 1522 get_status_string(rulla->status));
1559 printk("%s\n", buf); 1523 pr_info("%s\n", buf);
1560 } 1524 }
1561 } 1525 }
1562 1526
1563 if (!hlist_empty(&priv->lec_no_forward)) 1527 if (!hlist_empty(&priv->lec_no_forward))
1564 printk("No forward\n"); 1528 pr_info("No forward\n");
1565 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { 1529 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) {
1566 offset = 0; 1530 offset = 0;
1567 offset += sprintf(buf + offset, "Mac:"); 1531 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1568 for (j = 0; j < ETH_ALEN; j++) { 1532 offset += sprintf(buf + offset, " Atm:");
1569 offset += sprintf(buf + offset, "%2.2x ",
1570 rulla->mac_addr[j] & 0xff);
1571 }
1572 offset += sprintf(buf + offset, "Atm:");
1573 for (j = 0; j < ATM_ESA_LEN; j++) { 1533 for (j = 0; j < ATM_ESA_LEN; j++) {
1574 offset += sprintf(buf + offset, "%2.2x ", 1534 offset += sprintf(buf + offset, "%2.2x ",
1575 rulla->atm_addr[j] & 0xff); 1535 rulla->atm_addr[j] & 0xff);
@@ -1586,19 +1546,15 @@ static void dump_arp_table(struct lec_priv *priv)
1586 "Flags:%x, Packets_flooded:%x, Status: %s ", 1546 "Flags:%x, Packets_flooded:%x, Status: %s ",
1587 rulla->flags, rulla->packets_flooded, 1547 rulla->flags, rulla->packets_flooded,
1588 get_status_string(rulla->status)); 1548 get_status_string(rulla->status));
1589 printk("%s\n", buf); 1549 pr_info("%s\n", buf);
1590 } 1550 }
1591 1551
1592 if (!hlist_empty(&priv->lec_arp_empty_ones)) 1552 if (!hlist_empty(&priv->lec_arp_empty_ones))
1593 printk("Empty ones\n"); 1553 pr_info("Empty ones\n");
1594 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { 1554 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) {
1595 offset = 0; 1555 offset = 0;
1596 offset += sprintf(buf + offset, "Mac:"); 1556 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1597 for (j = 0; j < ETH_ALEN; j++) { 1557 offset += sprintf(buf + offset, " Atm:");
1598 offset += sprintf(buf + offset, "%2.2x ",
1599 rulla->mac_addr[j] & 0xff);
1600 }
1601 offset += sprintf(buf + offset, "Atm:");
1602 for (j = 0; j < ATM_ESA_LEN; j++) { 1558 for (j = 0; j < ATM_ESA_LEN; j++) {
1603 offset += sprintf(buf + offset, "%2.2x ", 1559 offset += sprintf(buf + offset, "%2.2x ",
1604 rulla->atm_addr[j] & 0xff); 1560 rulla->atm_addr[j] & 0xff);
@@ -1615,19 +1571,15 @@ static void dump_arp_table(struct lec_priv *priv)
1615 "Flags:%x, Packets_flooded:%x, Status: %s ", 1571 "Flags:%x, Packets_flooded:%x, Status: %s ",
1616 rulla->flags, rulla->packets_flooded, 1572 rulla->flags, rulla->packets_flooded,
1617 get_status_string(rulla->status)); 1573 get_status_string(rulla->status));
1618 printk("%s", buf); 1574 pr_info("%s", buf);
1619 } 1575 }
1620 1576
1621 if (!hlist_empty(&priv->mcast_fwds)) 1577 if (!hlist_empty(&priv->mcast_fwds))
1622 printk("Multicast Forward VCCs\n"); 1578 pr_info("Multicast Forward VCCs\n");
1623 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { 1579 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) {
1624 offset = 0; 1580 offset = 0;
1625 offset += sprintf(buf + offset, "Mac:"); 1581 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1626 for (j = 0; j < ETH_ALEN; j++) { 1582 offset += sprintf(buf + offset, " Atm:");
1627 offset += sprintf(buf + offset, "%2.2x ",
1628 rulla->mac_addr[j] & 0xff);
1629 }
1630 offset += sprintf(buf + offset, "Atm:");
1631 for (j = 0; j < ATM_ESA_LEN; j++) { 1583 for (j = 0; j < ATM_ESA_LEN; j++) {
1632 offset += sprintf(buf + offset, "%2.2x ", 1584 offset += sprintf(buf + offset, "%2.2x ",
1633 rulla->atm_addr[j] & 0xff); 1585 rulla->atm_addr[j] & 0xff);
@@ -1644,7 +1596,7 @@ static void dump_arp_table(struct lec_priv *priv)
1644 "Flags:%x, Packets_flooded:%x, Status: %s ", 1596 "Flags:%x, Packets_flooded:%x, Status: %s ",
1645 rulla->flags, rulla->packets_flooded, 1597 rulla->flags, rulla->packets_flooded,
1646 get_status_string(rulla->status)); 1598 get_status_string(rulla->status));
1647 printk("%s\n", buf); 1599 pr_info("%s\n", buf);
1648 } 1600 }
1649 1601
1650} 1602}
@@ -1670,14 +1622,16 @@ static void lec_arp_destroy(struct lec_priv *priv)
1670 1622
1671 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1623 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1672 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1624 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1673 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1625 hlist_for_each_entry_safe(entry, node, next,
1626 &priv->lec_arp_tables[i], next) {
1674 lec_arp_remove(priv, entry); 1627 lec_arp_remove(priv, entry);
1675 lec_arp_put(entry); 1628 lec_arp_put(entry);
1676 } 1629 }
1677 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1630 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1678 } 1631 }
1679 1632
1680 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 1633 hlist_for_each_entry_safe(entry, node, next,
1634 &priv->lec_arp_empty_ones, next) {
1681 del_timer_sync(&entry->timer); 1635 del_timer_sync(&entry->timer);
1682 lec_arp_clear_vccs(entry); 1636 lec_arp_clear_vccs(entry);
1683 hlist_del(&entry->next); 1637 hlist_del(&entry->next);
@@ -1685,7 +1639,8 @@ static void lec_arp_destroy(struct lec_priv *priv)
1685 } 1639 }
1686 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1640 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1687 1641
1688 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { 1642 hlist_for_each_entry_safe(entry, node, next,
1643 &priv->lec_no_forward, next) {
1689 del_timer_sync(&entry->timer); 1644 del_timer_sync(&entry->timer);
1690 lec_arp_clear_vccs(entry); 1645 lec_arp_clear_vccs(entry);
1691 hlist_del(&entry->next); 1646 hlist_del(&entry->next);
@@ -1714,15 +1669,12 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1714 struct hlist_head *head; 1669 struct hlist_head *head;
1715 struct lec_arp_table *entry; 1670 struct lec_arp_table *entry;
1716 1671
1717 pr_debug("LEC_ARP: lec_arp_find :%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1672 pr_debug("%pM\n", mac_addr);
1718 mac_addr[0] & 0xff, mac_addr[1] & 0xff, mac_addr[2] & 0xff,
1719 mac_addr[3] & 0xff, mac_addr[4] & 0xff, mac_addr[5] & 0xff);
1720 1673
1721 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; 1674 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
1722 hlist_for_each_entry(entry, node, head, next) { 1675 hlist_for_each_entry(entry, node, head, next) {
1723 if (!compare_ether_addr(mac_addr, entry->mac_addr)) { 1676 if (!compare_ether_addr(mac_addr, entry->mac_addr))
1724 return entry; 1677 return entry;
1725 }
1726 } 1678 }
1727 return NULL; 1679 return NULL;
1728} 1680}
@@ -1734,7 +1686,7 @@ static struct lec_arp_table *make_entry(struct lec_priv *priv,
1734 1686
1735 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1687 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
1736 if (!to_return) { 1688 if (!to_return) {
1737 printk("LEC: Arp entry kmalloc failed\n"); 1689 pr_info("LEC: Arp entry kmalloc failed\n");
1738 return NULL; 1690 return NULL;
1739 } 1691 }
1740 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1692 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
@@ -1755,7 +1707,7 @@ static void lec_arp_expire_arp(unsigned long data)
1755 1707
1756 entry = (struct lec_arp_table *)data; 1708 entry = (struct lec_arp_table *)data;
1757 1709
1758 pr_debug("lec_arp_expire_arp\n"); 1710 pr_debug("\n");
1759 if (entry->status == ESI_ARP_PENDING) { 1711 if (entry->status == ESI_ARP_PENDING) {
1760 if (entry->no_tries <= entry->priv->max_retry_count) { 1712 if (entry->no_tries <= entry->priv->max_retry_count) {
1761 if (entry->is_rdesc) 1713 if (entry->is_rdesc)
@@ -1779,10 +1731,10 @@ static void lec_arp_expire_vcc(unsigned long data)
1779 1731
1780 del_timer(&to_remove->timer); 1732 del_timer(&to_remove->timer);
1781 1733
1782 pr_debug("LEC_ARP %p %p: lec_arp_expire_vcc vpi:%d vci:%d\n", 1734 pr_debug("%p %p: vpi:%d vci:%d\n",
1783 to_remove, priv, 1735 to_remove, priv,
1784 to_remove->vcc ? to_remove->recv_vcc->vpi : 0, 1736 to_remove->vcc ? to_remove->recv_vcc->vpi : 0,
1785 to_remove->vcc ? to_remove->recv_vcc->vci : 0); 1737 to_remove->vcc ? to_remove->recv_vcc->vci : 0);
1786 1738
1787 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1739 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1788 hlist_del(&to_remove->next); 1740 hlist_del(&to_remove->next);
@@ -1792,6 +1744,50 @@ static void lec_arp_expire_vcc(unsigned long data)
1792 lec_arp_put(to_remove); 1744 lec_arp_put(to_remove);
1793} 1745}
1794 1746
1747static bool __lec_arp_check_expire(struct lec_arp_table *entry,
1748 unsigned long now,
1749 struct lec_priv *priv)
1750{
1751 unsigned long time_to_check;
1752
1753 if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change)
1754 time_to_check = priv->forward_delay_time;
1755 else
1756 time_to_check = priv->aging_time;
1757
1758 pr_debug("About to expire: %lx - %lx > %lx\n",
1759 now, entry->last_used, time_to_check);
1760 if (time_after(now, entry->last_used + time_to_check) &&
1761 !(entry->flags & LEC_PERMANENT_FLAG) &&
1762 !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */
1763 /* Remove entry */
1764 pr_debug("Entry timed out\n");
1765 lec_arp_remove(priv, entry);
1766 lec_arp_put(entry);
1767 } else {
1768 /* Something else */
1769 if ((entry->status == ESI_VC_PENDING ||
1770 entry->status == ESI_ARP_PENDING) &&
1771 time_after_eq(now, entry->timestamp +
1772 priv->max_unknown_frame_time)) {
1773 entry->timestamp = jiffies;
1774 entry->packets_flooded = 0;
1775 if (entry->status == ESI_VC_PENDING)
1776 send_to_lecd(priv, l_svc_setup,
1777 entry->mac_addr,
1778 entry->atm_addr,
1779 NULL);
1780 }
1781 if (entry->status == ESI_FLUSH_PENDING &&
1782 time_after_eq(now, entry->timestamp +
1783 priv->path_switching_delay)) {
1784 lec_arp_hold(entry);
1785 return true;
1786 }
1787 }
1788
1789 return false;
1790}
1795/* 1791/*
1796 * Expire entries. 1792 * Expire entries.
1797 * 1. Re-set timer 1793 * 1. Re-set timer
@@ -1816,62 +1812,28 @@ static void lec_arp_check_expire(struct work_struct *work)
1816 struct hlist_node *node, *next; 1812 struct hlist_node *node, *next;
1817 struct lec_arp_table *entry; 1813 struct lec_arp_table *entry;
1818 unsigned long now; 1814 unsigned long now;
1819 unsigned long time_to_check;
1820 int i; 1815 int i;
1821 1816
1822 pr_debug("lec_arp_check_expire %p\n", priv); 1817 pr_debug("%p\n", priv);
1823 now = jiffies; 1818 now = jiffies;
1824restart: 1819restart:
1825 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1820 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1826 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1821 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1827 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1822 hlist_for_each_entry_safe(entry, node, next,
1828 if ((entry->flags) & LEC_REMOTE_FLAG && 1823 &priv->lec_arp_tables[i], next) {
1829 priv->topology_change) 1824 if (__lec_arp_check_expire(entry, now, priv)) {
1830 time_to_check = priv->forward_delay_time; 1825 struct sk_buff *skb;
1831 else 1826 struct atm_vcc *vcc = entry->vcc;
1832 time_to_check = priv->aging_time; 1827
1833 1828 spin_unlock_irqrestore(&priv->lec_arp_lock,
1834 pr_debug("About to expire: %lx - %lx > %lx\n", 1829 flags);
1835 now, entry->last_used, time_to_check); 1830 while ((skb = skb_dequeue(&entry->tx_wait)))
1836 if (time_after(now, entry->last_used + time_to_check) 1831 lec_send(vcc, skb);
1837 && !(entry->flags & LEC_PERMANENT_FLAG) 1832 entry->last_used = jiffies;
1838 && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ 1833 entry->status = ESI_FORWARD_DIRECT;
1839 /* Remove entry */
1840 pr_debug("LEC:Entry timed out\n");
1841 lec_arp_remove(priv, entry);
1842 lec_arp_put(entry); 1834 lec_arp_put(entry);
1843 } else { 1835
1844 /* Something else */ 1836 goto restart;
1845 if ((entry->status == ESI_VC_PENDING ||
1846 entry->status == ESI_ARP_PENDING)
1847 && time_after_eq(now,
1848 entry->timestamp +
1849 priv->
1850 max_unknown_frame_time)) {
1851 entry->timestamp = jiffies;
1852 entry->packets_flooded = 0;
1853 if (entry->status == ESI_VC_PENDING)
1854 send_to_lecd(priv, l_svc_setup,
1855 entry->mac_addr,
1856 entry->atm_addr,
1857 NULL);
1858 }
1859 if (entry->status == ESI_FLUSH_PENDING
1860 &&
1861 time_after_eq(now, entry->timestamp +
1862 priv->path_switching_delay)) {
1863 struct sk_buff *skb;
1864 struct atm_vcc *vcc = entry->vcc;
1865
1866 lec_arp_hold(entry);
1867 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1868 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
1869 lec_send(vcc, skb);
1870 entry->last_used = jiffies;
1871 entry->status = ESI_FORWARD_DIRECT;
1872 lec_arp_put(entry);
1873 goto restart;
1874 }
1875 } 1837 }
1876 } 1838 }
1877 } 1839 }
@@ -1885,7 +1847,8 @@ restart:
1885 * 1847 *
1886 */ 1848 */
1887static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, 1849static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1888 const unsigned char *mac_to_find, int is_rdesc, 1850 const unsigned char *mac_to_find,
1851 int is_rdesc,
1889 struct lec_arp_table **ret_entry) 1852 struct lec_arp_table **ret_entry)
1890{ 1853{
1891 unsigned long flags; 1854 unsigned long flags;
@@ -1921,9 +1884,8 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1921 * If the LE_ARP cache entry is still pending, reset count to 0 1884 * If the LE_ARP cache entry is still pending, reset count to 0
1922 * so another LE_ARP request can be made for this frame. 1885 * so another LE_ARP request can be made for this frame.
1923 */ 1886 */
1924 if (entry->status == ESI_ARP_PENDING) { 1887 if (entry->status == ESI_ARP_PENDING)
1925 entry->no_tries = 0; 1888 entry->no_tries = 0;
1926 }
1927 /* 1889 /*
1928 * Data direct VC not yet set up, check to see if the unknown 1890 * Data direct VC not yet set up, check to see if the unknown
1929 * frame count is greater than the limit. If the limit has 1891 * frame count is greater than the limit. If the limit has
@@ -1934,7 +1896,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1934 entry->packets_flooded < 1896 entry->packets_flooded <
1935 priv->maximum_unknown_frame_count) { 1897 priv->maximum_unknown_frame_count) {
1936 entry->packets_flooded++; 1898 entry->packets_flooded++;
1937 pr_debug("LEC_ARP: Flooding..\n"); 1899 pr_debug("Flooding..\n");
1938 found = priv->mcast_vcc; 1900 found = priv->mcast_vcc;
1939 goto out; 1901 goto out;
1940 } 1902 }
@@ -1945,13 +1907,13 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1945 */ 1907 */
1946 lec_arp_hold(entry); 1908 lec_arp_hold(entry);
1947 *ret_entry = entry; 1909 *ret_entry = entry;
1948 pr_debug("lec: entry->status %d entry->vcc %p\n", entry->status, 1910 pr_debug("entry->status %d entry->vcc %p\n", entry->status,
1949 entry->vcc); 1911 entry->vcc);
1950 found = NULL; 1912 found = NULL;
1951 } else { 1913 } else {
1952 /* No matching entry was found */ 1914 /* No matching entry was found */
1953 entry = make_entry(priv, mac_to_find); 1915 entry = make_entry(priv, mac_to_find);
1954 pr_debug("LEC_ARP: Making entry\n"); 1916 pr_debug("Making entry\n");
1955 if (!entry) { 1917 if (!entry) {
1956 found = priv->mcast_vcc; 1918 found = priv->mcast_vcc;
1957 goto out; 1919 goto out;
@@ -1988,13 +1950,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
1988 struct lec_arp_table *entry; 1950 struct lec_arp_table *entry;
1989 int i; 1951 int i;
1990 1952
1991 pr_debug("lec_addr_delete\n"); 1953 pr_debug("\n");
1992 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1954 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1993 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1955 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1994 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1956 hlist_for_each_entry_safe(entry, node, next,
1995 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) 1957 &priv->lec_arp_tables[i], next) {
1996 && (permanent || 1958 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
1997 !(entry->flags & LEC_PERMANENT_FLAG))) { 1959 (permanent ||
1960 !(entry->flags & LEC_PERMANENT_FLAG))) {
1998 lec_arp_remove(priv, entry); 1961 lec_arp_remove(priv, entry);
1999 lec_arp_put(entry); 1962 lec_arp_put(entry);
2000 } 1963 }
@@ -2019,10 +1982,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2019 struct lec_arp_table *entry, *tmp; 1982 struct lec_arp_table *entry, *tmp;
2020 int i; 1983 int i;
2021 1984
2022 pr_debug("lec:%s", (targetless_le_arp) ? "targetless " : " "); 1985 pr_debug("%smac:%pM\n",
2023 pr_debug("lec_arp_update mac:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 1986 (targetless_le_arp) ? "targetless " : "", mac_addr);
2024 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
2025 mac_addr[4], mac_addr[5]);
2026 1987
2027 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1988 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2028 entry = lec_arp_find(priv, mac_addr); 1989 entry = lec_arp_find(priv, mac_addr);
@@ -2032,7 +1993,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2032 * we have no entry in the cache. 7.1.30 1993 * we have no entry in the cache. 7.1.30
2033 */ 1994 */
2034 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 1995 if (!hlist_empty(&priv->lec_arp_empty_ones)) {
2035 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 1996 hlist_for_each_entry_safe(entry, node, next,
1997 &priv->lec_arp_empty_ones, next) {
2036 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { 1998 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
2037 hlist_del(&entry->next); 1999 hlist_del(&entry->next);
2038 del_timer(&entry->timer); 2000 del_timer(&entry->timer);
@@ -2076,7 +2038,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2076 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); 2038 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
2077 del_timer(&entry->timer); 2039 del_timer(&entry->timer);
2078 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2040 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2079 hlist_for_each_entry(tmp, node, &priv->lec_arp_tables[i], next) { 2041 hlist_for_each_entry(tmp, node,
2042 &priv->lec_arp_tables[i], next) {
2080 if (entry != tmp && 2043 if (entry != tmp &&
2081 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { 2044 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
2082 /* Vcc to this host exists */ 2045 /* Vcc to this host exists */
@@ -2121,14 +2084,13 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2121 int i, found_entry = 0; 2084 int i, found_entry = 0;
2122 2085
2123 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2086 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2087 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
2124 if (ioc_data->receive == 2) { 2088 if (ioc_data->receive == 2) {
2125 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
2126
2127 pr_debug("LEC_ARP: Attaching mcast forward\n"); 2089 pr_debug("LEC_ARP: Attaching mcast forward\n");
2128#if 0 2090#if 0
2129 entry = lec_arp_find(priv, bus_mac); 2091 entry = lec_arp_find(priv, bus_mac);
2130 if (!entry) { 2092 if (!entry) {
2131 printk("LEC_ARP: Multicast entry not found!\n"); 2093 pr_info("LEC_ARP: Multicast entry not found!\n");
2132 goto out; 2094 goto out;
2133 } 2095 }
2134 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 2096 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
@@ -2149,19 +2111,17 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2149 * Vcc which we don't want to make default vcc, 2111 * Vcc which we don't want to make default vcc,
2150 * attach it anyway. 2112 * attach it anyway.
2151 */ 2113 */
2152 pr_debug 2114 pr_debug("LEC_ARP:Attaching data direct, not default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2153 ("LEC_ARP:Attaching data direct, not default: " 2115 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2154 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2116 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2155 ioc_data->atm_addr[0], ioc_data->atm_addr[1], 2117 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2156 ioc_data->atm_addr[2], ioc_data->atm_addr[3], 2118 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2157 ioc_data->atm_addr[4], ioc_data->atm_addr[5], 2119 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2158 ioc_data->atm_addr[6], ioc_data->atm_addr[7], 2120 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2159 ioc_data->atm_addr[8], ioc_data->atm_addr[9], 2121 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2160 ioc_data->atm_addr[10], ioc_data->atm_addr[11], 2122 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2161 ioc_data->atm_addr[12], ioc_data->atm_addr[13], 2123 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2162 ioc_data->atm_addr[14], ioc_data->atm_addr[15], 2124 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2163 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2164 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2165 entry = make_entry(priv, bus_mac); 2125 entry = make_entry(priv, bus_mac);
2166 if (entry == NULL) 2126 if (entry == NULL)
2167 goto out; 2127 goto out;
@@ -2177,29 +2137,28 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2177 dump_arp_table(priv); 2137 dump_arp_table(priv);
2178 goto out; 2138 goto out;
2179 } 2139 }
2180 pr_debug 2140 pr_debug("LEC_ARP:Attaching data direct, default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2181 ("LEC_ARP:Attaching data direct, default: " 2141 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2182 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2142 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2183 ioc_data->atm_addr[0], ioc_data->atm_addr[1], 2143 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2184 ioc_data->atm_addr[2], ioc_data->atm_addr[3], 2144 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2185 ioc_data->atm_addr[4], ioc_data->atm_addr[5], 2145 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2186 ioc_data->atm_addr[6], ioc_data->atm_addr[7], 2146 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2187 ioc_data->atm_addr[8], ioc_data->atm_addr[9], 2147 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2188 ioc_data->atm_addr[10], ioc_data->atm_addr[11], 2148 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2189 ioc_data->atm_addr[12], ioc_data->atm_addr[13], 2149 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2190 ioc_data->atm_addr[14], ioc_data->atm_addr[15], 2150 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2191 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2192 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2193 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2151 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2194 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2152 hlist_for_each_entry(entry, node,
2153 &priv->lec_arp_tables[i], next) {
2195 if (memcmp 2154 if (memcmp
2196 (ioc_data->atm_addr, entry->atm_addr, 2155 (ioc_data->atm_addr, entry->atm_addr,
2197 ATM_ESA_LEN) == 0) { 2156 ATM_ESA_LEN) == 0) {
2198 pr_debug("LEC_ARP: Attaching data direct\n"); 2157 pr_debug("LEC_ARP: Attaching data direct\n");
2199 pr_debug("Currently -> Vcc: %d, Rvcc:%d\n", 2158 pr_debug("Currently -> Vcc: %d, Rvcc:%d\n",
2200 entry->vcc ? entry->vcc->vci : 0, 2159 entry->vcc ? entry->vcc->vci : 0,
2201 entry->recv_vcc ? entry->recv_vcc-> 2160 entry->recv_vcc ? entry->recv_vcc->
2202 vci : 0); 2161 vci : 0);
2203 found_entry = 1; 2162 found_entry = 1;
2204 del_timer(&entry->timer); 2163 del_timer(&entry->timer);
2205 entry->vcc = vcc; 2164 entry->vcc = vcc;
@@ -2271,19 +2230,21 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2271 struct lec_arp_table *entry; 2230 struct lec_arp_table *entry;
2272 int i; 2231 int i;
2273 2232
2274 pr_debug("LEC:lec_flush_complete %lx\n", tran_id); 2233 pr_debug("%lx\n", tran_id);
2275restart: 2234restart:
2276 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2235 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2277 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2236 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2278 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2237 hlist_for_each_entry(entry, node,
2279 if (entry->flush_tran_id == tran_id 2238 &priv->lec_arp_tables[i], next) {
2280 && entry->status == ESI_FLUSH_PENDING) { 2239 if (entry->flush_tran_id == tran_id &&
2240 entry->status == ESI_FLUSH_PENDING) {
2281 struct sk_buff *skb; 2241 struct sk_buff *skb;
2282 struct atm_vcc *vcc = entry->vcc; 2242 struct atm_vcc *vcc = entry->vcc;
2283 2243
2284 lec_arp_hold(entry); 2244 lec_arp_hold(entry);
2285 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2245 spin_unlock_irqrestore(&priv->lec_arp_lock,
2286 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) 2246 flags);
2247 while ((skb = skb_dequeue(&entry->tx_wait)))
2287 lec_send(vcc, skb); 2248 lec_send(vcc, skb);
2288 entry->last_used = jiffies; 2249 entry->last_used = jiffies;
2289 entry->status = ESI_FORWARD_DIRECT; 2250 entry->status = ESI_FORWARD_DIRECT;
@@ -2308,11 +2269,12 @@ lec_set_flush_tran_id(struct lec_priv *priv,
2308 2269
2309 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2270 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2310 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) 2271 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
2311 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2272 hlist_for_each_entry(entry, node,
2273 &priv->lec_arp_tables[i], next) {
2312 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { 2274 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
2313 entry->flush_tran_id = tran_id; 2275 entry->flush_tran_id = tran_id;
2314 pr_debug("Set flush transaction id to %lx for %p\n", 2276 pr_debug("Set flush transaction id to %lx for %p\n",
2315 tran_id, entry); 2277 tran_id, entry);
2316 } 2278 }
2317 } 2279 }
2318 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2280 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
@@ -2328,7 +2290,8 @@ static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc)
2328 struct lec_vcc_priv *vpriv; 2290 struct lec_vcc_priv *vpriv;
2329 int err = 0; 2291 int err = 0;
2330 2292
2331 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 2293 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
2294 if (!vpriv)
2332 return -ENOMEM; 2295 return -ENOMEM;
2333 vpriv->xoff = 0; 2296 vpriv->xoff = 0;
2334 vpriv->old_pop = vcc->pop; 2297 vpriv->old_pop = vcc->pop;
@@ -2368,18 +2331,19 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2368 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2331 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2369 2332
2370 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2333 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2371 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 2334 hlist_for_each_entry_safe(entry, node, next,
2335 &priv->lec_arp_tables[i], next) {
2372 if (vcc == entry->vcc) { 2336 if (vcc == entry->vcc) {
2373 lec_arp_remove(priv, entry); 2337 lec_arp_remove(priv, entry);
2374 lec_arp_put(entry); 2338 lec_arp_put(entry);
2375 if (priv->mcast_vcc == vcc) { 2339 if (priv->mcast_vcc == vcc)
2376 priv->mcast_vcc = NULL; 2340 priv->mcast_vcc = NULL;
2377 }
2378 } 2341 }
2379 } 2342 }
2380 } 2343 }
2381 2344
2382 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 2345 hlist_for_each_entry_safe(entry, node, next,
2346 &priv->lec_arp_empty_ones, next) {
2383 if (entry->vcc == vcc) { 2347 if (entry->vcc == vcc) {
2384 lec_arp_clear_vccs(entry); 2348 lec_arp_clear_vccs(entry);
2385 del_timer(&entry->timer); 2349 del_timer(&entry->timer);
@@ -2388,7 +2352,8 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2388 } 2352 }
2389 } 2353 }
2390 2354
2391 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { 2355 hlist_for_each_entry_safe(entry, node, next,
2356 &priv->lec_no_forward, next) {
2392 if (entry->recv_vcc == vcc) { 2357 if (entry->recv_vcc == vcc) {
2393 lec_arp_clear_vccs(entry); 2358 lec_arp_clear_vccs(entry);
2394 del_timer(&entry->timer); 2359 del_timer(&entry->timer);
@@ -2429,14 +2394,16 @@ lec_arp_check_empties(struct lec_priv *priv,
2429 src = hdr->h_source; 2394 src = hdr->h_source;
2430 2395
2431 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2396 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2432 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 2397 hlist_for_each_entry_safe(entry, node, next,
2398 &priv->lec_arp_empty_ones, next) {
2433 if (vcc == entry->vcc) { 2399 if (vcc == entry->vcc) {
2434 del_timer(&entry->timer); 2400 del_timer(&entry->timer);
2435 memcpy(entry->mac_addr, src, ETH_ALEN); 2401 memcpy(entry->mac_addr, src, ETH_ALEN);
2436 entry->status = ESI_FORWARD_DIRECT; 2402 entry->status = ESI_FORWARD_DIRECT;
2437 entry->last_used = jiffies; 2403 entry->last_used = jiffies;
2438 /* We might have got an entry */ 2404 /* We might have got an entry */
2439 if ((tmp = lec_arp_find(priv, src))) { 2405 tmp = lec_arp_find(priv, src);
2406 if (tmp) {
2440 lec_arp_remove(priv, tmp); 2407 lec_arp_remove(priv, tmp);
2441 lec_arp_put(tmp); 2408 lec_arp_put(tmp);
2442 } 2409 }
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 38a6cb0863f..a6521c8aa88 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
2
1#include <linux/kernel.h> 3#include <linux/kernel.h>
2#include <linux/string.h> 4#include <linux/string.h>
3#include <linux/timer.h> 5#include <linux/timer.h>
@@ -13,8 +15,8 @@
13#include <net/sock.h> 15#include <net/sock.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
15#include <linux/ip.h> 17#include <linux/ip.h>
18#include <linux/uaccess.h>
16#include <asm/byteorder.h> 19#include <asm/byteorder.h>
17#include <asm/uaccess.h>
18#include <net/checksum.h> /* for ip_fast_csum() */ 20#include <net/checksum.h> /* for ip_fast_csum() */
19#include <net/arp.h> 21#include <net/arp.h>
20#include <net/dst.h> 22#include <net/dst.h>
@@ -36,31 +38,47 @@
36 */ 38 */
37 39
38#if 0 40#if 0
39#define dprintk printk /* debug */ 41#define dprintk(format, args...) \
42 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
43#define dprintk_cont(format, args...) printk(KERN_CONT format, ##args)
40#else 44#else
41#define dprintk(format,args...) 45#define dprintk(format, args...) \
46 do { if (0) \
47 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
48 } while (0)
49#define dprintk_cont(format, args...) \
50 do { if (0) printk(KERN_CONT format, ##args); } while (0)
42#endif 51#endif
43 52
44#if 0 53#if 0
45#define ddprintk printk /* more debug */ 54#define ddprintk(format, args...) \
55 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
56#define ddprintk_cont(format, args...) printk(KERN_CONT format, ##args)
46#else 57#else
47#define ddprintk(format,args...) 58#define ddprintk(format, args...) \
59 do { if (0) \
60 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
61 } while (0)
62#define ddprintk_cont(format, args...) \
63 do { if (0) printk(KERN_CONT format, ##args); } while (0)
48#endif 64#endif
49 65
50
51
52#define MPOA_TAG_LEN 4 66#define MPOA_TAG_LEN 4
53 67
54/* mpc_daemon -> kernel */ 68/* mpc_daemon -> kernel */
55static void MPOA_trigger_rcvd (struct k_message *msg, struct mpoa_client *mpc); 69static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc);
56static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc); 70static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc);
57static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); 71static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
58static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); 72static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
59static void mps_death(struct k_message *msg, struct mpoa_client *mpc); 73static void mps_death(struct k_message *msg, struct mpoa_client *mpc);
60static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action); 74static void clean_up(struct k_message *msg, struct mpoa_client *mpc,
61static void MPOA_cache_impos_rcvd(struct k_message *msg, struct mpoa_client *mpc); 75 int action);
62static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); 76static void MPOA_cache_impos_rcvd(struct k_message *msg,
63static void set_mps_mac_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); 77 struct mpoa_client *mpc);
78static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
79 struct mpoa_client *mpc);
80static void set_mps_mac_addr_rcvd(struct k_message *mesg,
81 struct mpoa_client *mpc);
64 82
65static const uint8_t *copy_macs(struct mpoa_client *mpc, 83static const uint8_t *copy_macs(struct mpoa_client *mpc,
66 const uint8_t *router_mac, 84 const uint8_t *router_mac,
@@ -74,10 +92,11 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
74 92
75static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb); 93static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
76static netdev_tx_t mpc_send_packet(struct sk_buff *skb, 94static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
77 struct net_device *dev); 95 struct net_device *dev);
78static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev); 96static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
97 unsigned long event, void *dev);
79static void mpc_timer_refresh(void); 98static void mpc_timer_refresh(void);
80static void mpc_cache_check( unsigned long checking_time ); 99static void mpc_cache_check(unsigned long checking_time);
81 100
82static struct llc_snap_hdr llc_snap_mpoa_ctrl = { 101static struct llc_snap_hdr llc_snap_mpoa_ctrl = {
83 0xaa, 0xaa, 0x03, 102 0xaa, 0xaa, 0x03,
@@ -167,7 +186,7 @@ struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos)
167 186
168 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL); 187 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL);
169 if (entry == NULL) { 188 if (entry == NULL) {
170 printk("mpoa: atm_mpoa_add_qos: out of memory\n"); 189 pr_info("mpoa: out of memory\n");
171 return entry; 190 return entry;
172 } 191 }
173 192
@@ -185,10 +204,9 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
185 struct atm_mpoa_qos *qos; 204 struct atm_mpoa_qos *qos;
186 205
187 qos = qos_head; 206 qos = qos_head;
188 while( qos != NULL ){ 207 while (qos) {
189 if(qos->ipaddr == dst_ip) { 208 if (qos->ipaddr == dst_ip)
190 break; 209 break;
191 }
192 qos = qos->next; 210 qos = qos->next;
193 } 211 }
194 212
@@ -200,10 +218,10 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
200 */ 218 */
201int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry) 219int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry)
202{ 220{
203
204 struct atm_mpoa_qos *curr; 221 struct atm_mpoa_qos *curr;
205 222
206 if (entry == NULL) return 0; 223 if (entry == NULL)
224 return 0;
207 if (entry == qos_head) { 225 if (entry == qos_head) {
208 qos_head = qos_head->next; 226 qos_head = qos_head->next;
209 kfree(entry); 227 kfree(entry);
@@ -234,9 +252,17 @@ void atm_mpoa_disp_qos(struct seq_file *m)
234 252
235 while (qos != NULL) { 253 while (qos != NULL) {
236 seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n", 254 seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
237 &qos->ipaddr, 255 &qos->ipaddr,
238 qos->qos.txtp.max_pcr, qos->qos.txtp.pcr, qos->qos.txtp.min_pcr, qos->qos.txtp.max_cdv, qos->qos.txtp.max_sdu, 256 qos->qos.txtp.max_pcr,
239 qos->qos.rxtp.max_pcr, qos->qos.rxtp.pcr, qos->qos.rxtp.min_pcr, qos->qos.rxtp.max_cdv, qos->qos.rxtp.max_sdu); 257 qos->qos.txtp.pcr,
258 qos->qos.txtp.min_pcr,
259 qos->qos.txtp.max_cdv,
260 qos->qos.txtp.max_sdu,
261 qos->qos.rxtp.max_pcr,
262 qos->qos.rxtp.pcr,
263 qos->qos.rxtp.min_pcr,
264 qos->qos.rxtp.max_cdv,
265 qos->qos.rxtp.max_sdu);
240 qos = qos->next; 266 qos = qos->next;
241 } 267 }
242} 268}
@@ -256,7 +282,7 @@ static struct mpoa_client *alloc_mpc(void)
256{ 282{
257 struct mpoa_client *mpc; 283 struct mpoa_client *mpc;
258 284
259 mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL); 285 mpc = kzalloc(sizeof(struct mpoa_client), GFP_KERNEL);
260 if (mpc == NULL) 286 if (mpc == NULL)
261 return NULL; 287 return NULL;
262 rwlock_init(&mpc->ingress_lock); 288 rwlock_init(&mpc->ingress_lock);
@@ -266,7 +292,7 @@ static struct mpoa_client *alloc_mpc(void)
266 292
267 mpc->parameters.mpc_p1 = MPC_P1; 293 mpc->parameters.mpc_p1 = MPC_P1;
268 mpc->parameters.mpc_p2 = MPC_P2; 294 mpc->parameters.mpc_p2 = MPC_P2;
269 memset(mpc->parameters.mpc_p3,0,sizeof(mpc->parameters.mpc_p3)); 295 memset(mpc->parameters.mpc_p3, 0, sizeof(mpc->parameters.mpc_p3));
270 mpc->parameters.mpc_p4 = MPC_P4; 296 mpc->parameters.mpc_p4 = MPC_P4;
271 mpc->parameters.mpc_p5 = MPC_P5; 297 mpc->parameters.mpc_p5 = MPC_P5;
272 mpc->parameters.mpc_p6 = MPC_P6; 298 mpc->parameters.mpc_p6 = MPC_P6;
@@ -286,9 +312,9 @@ static struct mpoa_client *alloc_mpc(void)
286static void start_mpc(struct mpoa_client *mpc, struct net_device *dev) 312static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
287{ 313{
288 314
289 dprintk("mpoa: (%s) start_mpc:\n", mpc->dev->name); 315 dprintk("(%s)\n", mpc->dev->name);
290 if (!dev->netdev_ops) 316 if (!dev->netdev_ops)
291 printk("mpoa: (%s) start_mpc not starting\n", dev->name); 317 pr_info("(%s) not starting\n", dev->name);
292 else { 318 else {
293 mpc->old_ops = dev->netdev_ops; 319 mpc->old_ops = dev->netdev_ops;
294 mpc->new_ops = *mpc->old_ops; 320 mpc->new_ops = *mpc->old_ops;
@@ -300,14 +326,14 @@ static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
300static void stop_mpc(struct mpoa_client *mpc) 326static void stop_mpc(struct mpoa_client *mpc)
301{ 327{
302 struct net_device *dev = mpc->dev; 328 struct net_device *dev = mpc->dev;
303 dprintk("mpoa: (%s) stop_mpc:", mpc->dev->name); 329 dprintk("(%s)", mpc->dev->name);
304 330
305 /* Lets not nullify lec device's dev->hard_start_xmit */ 331 /* Lets not nullify lec device's dev->hard_start_xmit */
306 if (dev->netdev_ops != &mpc->new_ops) { 332 if (dev->netdev_ops != &mpc->new_ops) {
307 dprintk(" mpc already stopped, not fatal\n"); 333 dprintk_cont(" mpc already stopped, not fatal\n");
308 return; 334 return;
309 } 335 }
310 dprintk("\n"); 336 dprintk_cont("\n");
311 337
312 dev->netdev_ops = mpc->old_ops; 338 dev->netdev_ops = mpc->old_ops;
313 mpc->old_ops = NULL; 339 mpc->old_ops = NULL;
@@ -319,25 +345,18 @@ static const char *mpoa_device_type_string(char type) __attribute__ ((unused));
319 345
320static const char *mpoa_device_type_string(char type) 346static const char *mpoa_device_type_string(char type)
321{ 347{
322 switch(type) { 348 switch (type) {
323 case NON_MPOA: 349 case NON_MPOA:
324 return "non-MPOA device"; 350 return "non-MPOA device";
325 break;
326 case MPS: 351 case MPS:
327 return "MPS"; 352 return "MPS";
328 break;
329 case MPC: 353 case MPC:
330 return "MPC"; 354 return "MPC";
331 break;
332 case MPS_AND_MPC: 355 case MPS_AND_MPC:
333 return "both MPS and MPC"; 356 return "both MPS and MPC";
334 break;
335 default:
336 return "unspecified (non-MPOA) device";
337 break;
338 } 357 }
339 358
340 return ""; /* not reached */ 359 return "unspecified (non-MPOA) device";
341} 360}
342 361
343/* 362/*
@@ -362,26 +381,28 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
362 struct mpoa_client *mpc; 381 struct mpoa_client *mpc;
363 382
364 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */ 383 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */
365 dprintk("mpoa: (%s) lane2_assoc_ind: received TLV(s), ", dev->name); 384 dprintk("(%s) received TLV(s), ", dev->name);
366 dprintk("total length of all TLVs %d\n", sizeoftlvs); 385 dprintk("total length of all TLVs %d\n", sizeoftlvs);
367 mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */ 386 mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */
368 if (mpc == NULL) { 387 if (mpc == NULL) {
369 printk("mpoa: (%s) lane2_assoc_ind: no mpc\n", dev->name); 388 pr_info("(%s) no mpc\n", dev->name);
370 return; 389 return;
371 } 390 }
372 end_of_tlvs = tlvs + sizeoftlvs; 391 end_of_tlvs = tlvs + sizeoftlvs;
373 while (end_of_tlvs - tlvs >= 5) { 392 while (end_of_tlvs - tlvs >= 5) {
374 type = (tlvs[0] << 24) | (tlvs[1] << 16) | (tlvs[2] << 8) | tlvs[3]; 393 type = ((tlvs[0] << 24) | (tlvs[1] << 16) |
394 (tlvs[2] << 8) | tlvs[3]);
375 length = tlvs[4]; 395 length = tlvs[4];
376 tlvs += 5; 396 tlvs += 5;
377 dprintk(" type 0x%x length %02x\n", type, length); 397 dprintk(" type 0x%x length %02x\n", type, length);
378 if (tlvs + length > end_of_tlvs) { 398 if (tlvs + length > end_of_tlvs) {
379 printk("TLV value extends past its buffer, aborting parse\n"); 399 pr_info("TLV value extends past its buffer, aborting parse\n");
380 return; 400 return;
381 } 401 }
382 402
383 if (type == 0) { 403 if (type == 0) {
384 printk("mpoa: (%s) lane2_assoc_ind: TLV type was 0, returning\n", dev->name); 404 pr_info("mpoa: (%s) TLV type was 0, returning\n",
405 dev->name);
385 return; 406 return;
386 } 407 }
387 408
@@ -391,39 +412,48 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
391 } 412 }
392 mpoa_device_type = *tlvs++; 413 mpoa_device_type = *tlvs++;
393 number_of_mps_macs = *tlvs++; 414 number_of_mps_macs = *tlvs++;
394 dprintk("mpoa: (%s) MPOA device type '%s', ", dev->name, mpoa_device_type_string(mpoa_device_type)); 415 dprintk("(%s) MPOA device type '%s', ",
416 dev->name, mpoa_device_type_string(mpoa_device_type));
395 if (mpoa_device_type == MPS_AND_MPC && 417 if (mpoa_device_type == MPS_AND_MPC &&
396 length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */ 418 length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */
397 printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n", 419 pr_info("(%s) short MPOA Device Type TLV\n",
398 dev->name); 420 dev->name);
399 continue; 421 continue;
400 } 422 }
401 if ((mpoa_device_type == MPS || mpoa_device_type == MPC) 423 if ((mpoa_device_type == MPS || mpoa_device_type == MPC) &&
402 && length < 22 + number_of_mps_macs*ETH_ALEN) { 424 length < 22 + number_of_mps_macs*ETH_ALEN) {
403 printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n", 425 pr_info("(%s) short MPOA Device Type TLV\n", dev->name);
404 dev->name);
405 continue; 426 continue;
406 } 427 }
407 if (mpoa_device_type != MPS && mpoa_device_type != MPS_AND_MPC) { 428 if (mpoa_device_type != MPS &&
408 dprintk("ignoring non-MPS device\n"); 429 mpoa_device_type != MPS_AND_MPC) {
409 if (mpoa_device_type == MPC) tlvs += 20; 430 dprintk("ignoring non-MPS device ");
431 if (mpoa_device_type == MPC)
432 tlvs += 20;
410 continue; /* we are only interested in MPSs */ 433 continue; /* we are only interested in MPSs */
411 } 434 }
412 if (number_of_mps_macs == 0 && mpoa_device_type == MPS_AND_MPC) { 435 if (number_of_mps_macs == 0 &&
413 printk("\nmpoa: (%s) lane2_assoc_ind: MPS_AND_MPC has zero MACs\n", dev->name); 436 mpoa_device_type == MPS_AND_MPC) {
437 pr_info("(%s) MPS_AND_MPC has zero MACs\n", dev->name);
414 continue; /* someone should read the spec */ 438 continue; /* someone should read the spec */
415 } 439 }
416 dprintk("this MPS has %d MAC addresses\n", number_of_mps_macs); 440 dprintk_cont("this MPS has %d MAC addresses\n",
441 number_of_mps_macs);
417 442
418 /* ok, now we can go and tell our daemon the control address of MPS */ 443 /*
444 * ok, now we can go and tell our daemon
445 * the control address of MPS
446 */
419 send_set_mps_ctrl_addr(tlvs, mpc); 447 send_set_mps_ctrl_addr(tlvs, mpc);
420 448
421 tlvs = copy_macs(mpc, mac_addr, tlvs, number_of_mps_macs, mpoa_device_type); 449 tlvs = copy_macs(mpc, mac_addr, tlvs,
422 if (tlvs == NULL) return; 450 number_of_mps_macs, mpoa_device_type);
451 if (tlvs == NULL)
452 return;
423 } 453 }
424 if (end_of_tlvs - tlvs != 0) 454 if (end_of_tlvs - tlvs != 0)
425 printk("mpoa: (%s) lane2_assoc_ind: ignoring %Zd bytes of trailing TLV carbage\n", 455 pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n",
426 dev->name, end_of_tlvs - tlvs); 456 dev->name, end_of_tlvs - tlvs);
427 return; 457 return;
428} 458}
429 459
@@ -441,11 +471,12 @@ static const uint8_t *copy_macs(struct mpoa_client *mpc,
441 num_macs = (mps_macs > 1) ? mps_macs : 1; 471 num_macs = (mps_macs > 1) ? mps_macs : 1;
442 472
443 if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */ 473 if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */
444 if (mpc->number_of_mps_macs != 0) kfree(mpc->mps_macs); 474 if (mpc->number_of_mps_macs != 0)
475 kfree(mpc->mps_macs);
445 mpc->number_of_mps_macs = 0; 476 mpc->number_of_mps_macs = 0;
446 mpc->mps_macs = kmalloc(num_macs*ETH_ALEN, GFP_KERNEL); 477 mpc->mps_macs = kmalloc(num_macs * ETH_ALEN, GFP_KERNEL);
447 if (mpc->mps_macs == NULL) { 478 if (mpc->mps_macs == NULL) {
448 printk("mpoa: (%s) copy_macs: out of mem\n", mpc->dev->name); 479 pr_info("(%s) out of mem\n", mpc->dev->name);
449 return NULL; 480 return NULL;
450 } 481 }
451 } 482 }
@@ -478,24 +509,30 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
478 iph = (struct iphdr *)buff; 509 iph = (struct iphdr *)buff;
479 ipaddr = iph->daddr; 510 ipaddr = iph->daddr;
480 511
481 ddprintk("mpoa: (%s) send_via_shortcut: ipaddr 0x%x\n", mpc->dev->name, ipaddr); 512 ddprintk("(%s) ipaddr 0x%x\n",
513 mpc->dev->name, ipaddr);
482 514
483 entry = mpc->in_ops->get(ipaddr, mpc); 515 entry = mpc->in_ops->get(ipaddr, mpc);
484 if (entry == NULL) { 516 if (entry == NULL) {
485 entry = mpc->in_ops->add_entry(ipaddr, mpc); 517 entry = mpc->in_ops->add_entry(ipaddr, mpc);
486 if (entry != NULL) mpc->in_ops->put(entry); 518 if (entry != NULL)
519 mpc->in_ops->put(entry);
487 return 1; 520 return 1;
488 } 521 }
489 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN){ /* threshold not exceeded or VCC not ready */ 522 /* threshold not exceeded or VCC not ready */
490 ddprintk("mpoa: (%s) send_via_shortcut: cache_hit: returns != OPEN\n", mpc->dev->name); 523 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) {
524 ddprintk("(%s) cache_hit: returns != OPEN\n",
525 mpc->dev->name);
491 mpc->in_ops->put(entry); 526 mpc->in_ops->put(entry);
492 return 1; 527 return 1;
493 } 528 }
494 529
495 ddprintk("mpoa: (%s) send_via_shortcut: using shortcut\n", mpc->dev->name); 530 ddprintk("(%s) using shortcut\n",
531 mpc->dev->name);
496 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */ 532 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
497 if (iph->ttl <= 1) { 533 if (iph->ttl <= 1) {
498 ddprintk("mpoa: (%s) send_via_shortcut: IP ttl = %u, using LANE\n", mpc->dev->name, iph->ttl); 534 ddprintk("(%s) IP ttl = %u, using LANE\n",
535 mpc->dev->name, iph->ttl);
499 mpc->in_ops->put(entry); 536 mpc->in_ops->put(entry);
500 return 1; 537 return 1;
501 } 538 }
@@ -504,15 +541,18 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
504 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 541 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
505 542
506 if (entry->ctrl_info.tag != 0) { 543 if (entry->ctrl_info.tag != 0) {
507 ddprintk("mpoa: (%s) send_via_shortcut: adding tag 0x%x\n", mpc->dev->name, entry->ctrl_info.tag); 544 ddprintk("(%s) adding tag 0x%x\n",
545 mpc->dev->name, entry->ctrl_info.tag);
508 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag; 546 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
509 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ 547 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
510 skb_push(skb, sizeof(tagged_llc_snap_hdr)); /* add LLC/SNAP header */ 548 skb_push(skb, sizeof(tagged_llc_snap_hdr));
549 /* add LLC/SNAP header */
511 skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr, 550 skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
512 sizeof(tagged_llc_snap_hdr)); 551 sizeof(tagged_llc_snap_hdr));
513 } else { 552 } else {
514 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ 553 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
515 skb_push(skb, sizeof(struct llc_snap_hdr)); /* add LLC/SNAP header + tag */ 554 skb_push(skb, sizeof(struct llc_snap_hdr));
555 /* add LLC/SNAP header + tag */
516 skb_copy_to_linear_data(skb, &llc_snap_mpoa_data, 556 skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
517 sizeof(struct llc_snap_hdr)); 557 sizeof(struct llc_snap_hdr));
518 } 558 }
@@ -537,8 +577,8 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
537 int i = 0; 577 int i = 0;
538 578
539 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */ 579 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */
540 if(mpc == NULL) { 580 if (mpc == NULL) {
541 printk("mpoa: (%s) mpc_send_packet: no MPC found\n", dev->name); 581 pr_info("(%s) no MPC found\n", dev->name);
542 goto non_ip; 582 goto non_ip;
543 } 583 }
544 584
@@ -554,14 +594,15 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
554 goto non_ip; 594 goto non_ip;
555 595
556 while (i < mpc->number_of_mps_macs) { 596 while (i < mpc->number_of_mps_macs) {
557 if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN))) 597 if (!compare_ether_addr(eth->h_dest,
558 if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */ 598 (mpc->mps_macs + i*ETH_ALEN)))
559 return NETDEV_TX_OK; /* success! */ 599 if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
600 return NETDEV_TX_OK;
560 i++; 601 i++;
561 } 602 }
562 603
563 non_ip: 604non_ip:
564 return mpc->old_ops->ndo_start_xmit(skb,dev); 605 return mpc->old_ops->ndo_start_xmit(skb, dev);
565} 606}
566 607
567static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg) 608static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
@@ -574,7 +615,8 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
574 615
575 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc)); 616 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc));
576 if (bytes_left != 0) { 617 if (bytes_left != 0) {
577 printk("mpoa: mpc_vcc_attach: Short read (missed %d bytes) from userland\n", bytes_left); 618 pr_info("mpoa:Short read (missed %d bytes) from userland\n",
619 bytes_left);
578 return -EFAULT; 620 return -EFAULT;
579 } 621 }
580 ipaddr = ioc_data.ipaddr; 622 ipaddr = ioc_data.ipaddr;
@@ -587,18 +629,20 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
587 629
588 if (ioc_data.type == MPC_SOCKET_INGRESS) { 630 if (ioc_data.type == MPC_SOCKET_INGRESS) {
589 in_entry = mpc->in_ops->get(ipaddr, mpc); 631 in_entry = mpc->in_ops->get(ipaddr, mpc);
590 if (in_entry == NULL || in_entry->entry_state < INGRESS_RESOLVED) { 632 if (in_entry == NULL ||
591 printk("mpoa: (%s) mpc_vcc_attach: did not find RESOLVED entry from ingress cache\n", 633 in_entry->entry_state < INGRESS_RESOLVED) {
634 pr_info("(%s) did not find RESOLVED entry from ingress cache\n",
592 mpc->dev->name); 635 mpc->dev->name);
593 if (in_entry != NULL) mpc->in_ops->put(in_entry); 636 if (in_entry != NULL)
637 mpc->in_ops->put(in_entry);
594 return -EINVAL; 638 return -EINVAL;
595 } 639 }
596 printk("mpoa: (%s) mpc_vcc_attach: attaching ingress SVC, entry = %pI4\n", 640 pr_info("(%s) attaching ingress SVC, entry = %pI4\n",
597 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); 641 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
598 in_entry->shortcut = vcc; 642 in_entry->shortcut = vcc;
599 mpc->in_ops->put(in_entry); 643 mpc->in_ops->put(in_entry);
600 } else { 644 } else {
601 printk("mpoa: (%s) mpc_vcc_attach: attaching egress SVC\n", mpc->dev->name); 645 pr_info("(%s) attaching egress SVC\n", mpc->dev->name);
602 } 646 }
603 647
604 vcc->proto_data = mpc->dev; 648 vcc->proto_data = mpc->dev;
@@ -618,27 +662,27 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
618 662
619 mpc = find_mpc_by_lec(dev); 663 mpc = find_mpc_by_lec(dev);
620 if (mpc == NULL) { 664 if (mpc == NULL) {
621 printk("mpoa: (%s) mpc_vcc_close: close for unknown MPC\n", dev->name); 665 pr_info("(%s) close for unknown MPC\n", dev->name);
622 return; 666 return;
623 } 667 }
624 668
625 dprintk("mpoa: (%s) mpc_vcc_close:\n", dev->name); 669 dprintk("(%s)\n", dev->name);
626 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc); 670 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc);
627 if (in_entry) { 671 if (in_entry) {
628 dprintk("mpoa: (%s) mpc_vcc_close: ingress SVC closed ip = %pI4\n", 672 dprintk("(%s) ingress SVC closed ip = %pI4\n",
629 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); 673 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
630 in_entry->shortcut = NULL; 674 in_entry->shortcut = NULL;
631 mpc->in_ops->put(in_entry); 675 mpc->in_ops->put(in_entry);
632 } 676 }
633 eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc); 677 eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc);
634 if (eg_entry) { 678 if (eg_entry) {
635 dprintk("mpoa: (%s) mpc_vcc_close: egress SVC closed\n", mpc->dev->name); 679 dprintk("(%s) egress SVC closed\n", mpc->dev->name);
636 eg_entry->shortcut = NULL; 680 eg_entry->shortcut = NULL;
637 mpc->eg_ops->put(eg_entry); 681 mpc->eg_ops->put(eg_entry);
638 } 682 }
639 683
640 if (in_entry == NULL && eg_entry == NULL) 684 if (in_entry == NULL && eg_entry == NULL)
641 dprintk("mpoa: (%s) mpc_vcc_close: unused vcc closed\n", dev->name); 685 dprintk("(%s) unused vcc closed\n", dev->name);
642 686
643 return; 687 return;
644} 688}
@@ -652,18 +696,19 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
652 __be32 tag; 696 __be32 tag;
653 char *tmp; 697 char *tmp;
654 698
655 ddprintk("mpoa: (%s) mpc_push:\n", dev->name); 699 ddprintk("(%s)\n", dev->name);
656 if (skb == NULL) { 700 if (skb == NULL) {
657 dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name); 701 dprintk("(%s) null skb, closing VCC\n", dev->name);
658 mpc_vcc_close(vcc, dev); 702 mpc_vcc_close(vcc, dev);
659 return; 703 return;
660 } 704 }
661 705
662 skb->dev = dev; 706 skb->dev = dev;
663 if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) { 707 if (memcmp(skb->data, &llc_snap_mpoa_ctrl,
708 sizeof(struct llc_snap_hdr)) == 0) {
664 struct sock *sk = sk_atm(vcc); 709 struct sock *sk = sk_atm(vcc);
665 710
666 dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name); 711 dprintk("(%s) control packet arrived\n", dev->name);
667 /* Pass control packets to daemon */ 712 /* Pass control packets to daemon */
668 skb_queue_tail(&sk->sk_receive_queue, skb); 713 skb_queue_tail(&sk->sk_receive_queue, skb);
669 sk->sk_data_ready(sk, skb->len); 714 sk->sk_data_ready(sk, skb->len);
@@ -675,20 +720,22 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
675 720
676 mpc = find_mpc_by_lec(dev); 721 mpc = find_mpc_by_lec(dev);
677 if (mpc == NULL) { 722 if (mpc == NULL) {
678 printk("mpoa: (%s) mpc_push: unknown MPC\n", dev->name); 723 pr_info("(%s) unknown MPC\n", dev->name);
679 return; 724 return;
680 } 725 }
681 726
682 if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */ 727 if (memcmp(skb->data, &llc_snap_mpoa_data_tagged,
683 ddprintk("mpoa: (%s) mpc_push: tagged data packet arrived\n", dev->name); 728 sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
729 ddprintk("(%s) tagged data packet arrived\n", dev->name);
684 730
685 } else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */ 731 } else if (memcmp(skb->data, &llc_snap_mpoa_data,
686 printk("mpoa: (%s) mpc_push: non-tagged data packet arrived\n", dev->name); 732 sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
687 printk(" mpc_push: non-tagged data unsupported, purging\n"); 733 pr_info("(%s) Unsupported non-tagged data packet arrived. Purging\n",
734 dev->name);
688 dev_kfree_skb_any(skb); 735 dev_kfree_skb_any(skb);
689 return; 736 return;
690 } else { 737 } else {
691 printk("mpoa: (%s) mpc_push: garbage arrived, purging\n", dev->name); 738 pr_info("(%s) garbage arrived, purging\n", dev->name);
692 dev_kfree_skb_any(skb); 739 dev_kfree_skb_any(skb);
693 return; 740 return;
694 } 741 }
@@ -698,8 +745,8 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
698 745
699 eg = mpc->eg_ops->get_by_tag(tag, mpc); 746 eg = mpc->eg_ops->get_by_tag(tag, mpc);
700 if (eg == NULL) { 747 if (eg == NULL) {
701 printk("mpoa: (%s) mpc_push: Didn't find egress cache entry, tag = %u\n", 748 pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n",
702 dev->name,tag); 749 dev->name, tag);
703 purge_egress_shortcut(vcc, NULL); 750 purge_egress_shortcut(vcc, NULL);
704 dev_kfree_skb_any(skb); 751 dev_kfree_skb_any(skb);
705 return; 752 return;
@@ -711,13 +758,15 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
711 */ 758 */
712 if (eg->shortcut == NULL) { 759 if (eg->shortcut == NULL) {
713 eg->shortcut = vcc; 760 eg->shortcut = vcc;
714 printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name); 761 pr_info("(%s) egress SVC in use\n", dev->name);
715 } 762 }
716 763
717 skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */ 764 skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag));
718 new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */ 765 /* get rid of LLC/SNAP header */
766 new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length);
767 /* LLC/SNAP is shorter than MAC header :( */
719 dev_kfree_skb_any(skb); 768 dev_kfree_skb_any(skb);
720 if (new_skb == NULL){ 769 if (new_skb == NULL) {
721 mpc->eg_ops->put(eg); 770 mpc->eg_ops->put(eg);
722 return; 771 return;
723 } 772 }
@@ -750,7 +799,7 @@ static struct atm_dev mpc_dev = {
750 /* members not explicitly initialised will be 0 */ 799 /* members not explicitly initialised will be 0 */
751}; 800};
752 801
753static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg) 802static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg)
754{ 803{
755 struct mpoa_client *mpc; 804 struct mpoa_client *mpc;
756 struct lec_priv *priv; 805 struct lec_priv *priv;
@@ -770,15 +819,16 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
770 819
771 mpc = find_mpc_by_itfnum(arg); 820 mpc = find_mpc_by_itfnum(arg);
772 if (mpc == NULL) { 821 if (mpc == NULL) {
773 dprintk("mpoa: mpoad_attach: allocating new mpc for itf %d\n", arg); 822 dprintk("allocating new mpc for itf %d\n", arg);
774 mpc = alloc_mpc(); 823 mpc = alloc_mpc();
775 if (mpc == NULL) 824 if (mpc == NULL)
776 return -ENOMEM; 825 return -ENOMEM;
777 mpc->dev_num = arg; 826 mpc->dev_num = arg;
778 mpc->dev = find_lec_by_itfnum(arg); /* NULL if there was no lec */ 827 mpc->dev = find_lec_by_itfnum(arg);
828 /* NULL if there was no lec */
779 } 829 }
780 if (mpc->mpoad_vcc) { 830 if (mpc->mpoad_vcc) {
781 printk("mpoa: mpoad_attach: mpoad is already present for itf %d\n", arg); 831 pr_info("mpoad is already present for itf %d\n", arg);
782 return -EADDRINUSE; 832 return -EADDRINUSE;
783 } 833 }
784 834
@@ -794,8 +844,8 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
794 mpc->mpoad_vcc = vcc; 844 mpc->mpoad_vcc = vcc;
795 vcc->dev = &mpc_dev; 845 vcc->dev = &mpc_dev;
796 vcc_insert_socket(sk_atm(vcc)); 846 vcc_insert_socket(sk_atm(vcc));
797 set_bit(ATM_VF_META,&vcc->flags); 847 set_bit(ATM_VF_META, &vcc->flags);
798 set_bit(ATM_VF_READY,&vcc->flags); 848 set_bit(ATM_VF_READY, &vcc->flags);
799 849
800 if (mpc->dev) { 850 if (mpc->dev) {
801 char empty[ATM_ESA_LEN]; 851 char empty[ATM_ESA_LEN];
@@ -805,7 +855,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
805 /* set address if mpcd e.g. gets killed and restarted. 855 /* set address if mpcd e.g. gets killed and restarted.
806 * If we do not do it now we have to wait for the next LE_ARP 856 * If we do not do it now we have to wait for the next LE_ARP
807 */ 857 */
808 if ( memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0 ) 858 if (memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0)
809 send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc); 859 send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc);
810 } 860 }
811 861
@@ -817,7 +867,7 @@ static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc)
817{ 867{
818 struct k_message mesg; 868 struct k_message mesg;
819 869
820 memcpy (mpc->mps_ctrl_addr, addr, ATM_ESA_LEN); 870 memcpy(mpc->mps_ctrl_addr, addr, ATM_ESA_LEN);
821 871
822 mesg.type = SET_MPS_CTRL_ADDR; 872 mesg.type = SET_MPS_CTRL_ADDR;
823 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); 873 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN);
@@ -833,11 +883,11 @@ static void mpoad_close(struct atm_vcc *vcc)
833 883
834 mpc = find_mpc_by_vcc(vcc); 884 mpc = find_mpc_by_vcc(vcc);
835 if (mpc == NULL) { 885 if (mpc == NULL) {
836 printk("mpoa: mpoad_close: did not find MPC\n"); 886 pr_info("did not find MPC\n");
837 return; 887 return;
838 } 888 }
839 if (!mpc->mpoad_vcc) { 889 if (!mpc->mpoad_vcc) {
840 printk("mpoa: mpoad_close: close for non-present mpoad\n"); 890 pr_info("close for non-present mpoad\n");
841 return; 891 return;
842 } 892 }
843 893
@@ -857,7 +907,7 @@ static void mpoad_close(struct atm_vcc *vcc)
857 kfree_skb(skb); 907 kfree_skb(skb);
858 } 908 }
859 909
860 printk("mpoa: (%s) going down\n", 910 pr_info("(%s) going down\n",
861 (mpc->dev) ? mpc->dev->name : "<unknown>"); 911 (mpc->dev) ? mpc->dev->name : "<unknown>");
862 module_put(THIS_MODULE); 912 module_put(THIS_MODULE);
863 913
@@ -871,61 +921,61 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
871{ 921{
872 922
873 struct mpoa_client *mpc = find_mpc_by_vcc(vcc); 923 struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
874 struct k_message *mesg = (struct k_message*)skb->data; 924 struct k_message *mesg = (struct k_message *)skb->data;
875 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 925 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
876 926
877 if (mpc == NULL) { 927 if (mpc == NULL) {
878 printk("mpoa: msg_from_mpoad: no mpc found\n"); 928 pr_info("no mpc found\n");
879 return 0; 929 return 0;
880 } 930 }
881 dprintk("mpoa: (%s) msg_from_mpoad:", (mpc->dev) ? mpc->dev->name : "<unknown>"); 931 dprintk("(%s)", mpc->dev ? mpc->dev->name : "<unknown>");
882 switch(mesg->type) { 932 switch (mesg->type) {
883 case MPOA_RES_REPLY_RCVD: 933 case MPOA_RES_REPLY_RCVD:
884 dprintk(" mpoa_res_reply_rcvd\n"); 934 dprintk_cont("mpoa_res_reply_rcvd\n");
885 MPOA_res_reply_rcvd(mesg, mpc); 935 MPOA_res_reply_rcvd(mesg, mpc);
886 break; 936 break;
887 case MPOA_TRIGGER_RCVD: 937 case MPOA_TRIGGER_RCVD:
888 dprintk(" mpoa_trigger_rcvd\n"); 938 dprintk_cont("mpoa_trigger_rcvd\n");
889 MPOA_trigger_rcvd(mesg, mpc); 939 MPOA_trigger_rcvd(mesg, mpc);
890 break; 940 break;
891 case INGRESS_PURGE_RCVD: 941 case INGRESS_PURGE_RCVD:
892 dprintk(" nhrp_purge_rcvd\n"); 942 dprintk_cont("nhrp_purge_rcvd\n");
893 ingress_purge_rcvd(mesg, mpc); 943 ingress_purge_rcvd(mesg, mpc);
894 break; 944 break;
895 case EGRESS_PURGE_RCVD: 945 case EGRESS_PURGE_RCVD:
896 dprintk(" egress_purge_reply_rcvd\n"); 946 dprintk_cont("egress_purge_reply_rcvd\n");
897 egress_purge_rcvd(mesg, mpc); 947 egress_purge_rcvd(mesg, mpc);
898 break; 948 break;
899 case MPS_DEATH: 949 case MPS_DEATH:
900 dprintk(" mps_death\n"); 950 dprintk_cont("mps_death\n");
901 mps_death(mesg, mpc); 951 mps_death(mesg, mpc);
902 break; 952 break;
903 case CACHE_IMPOS_RCVD: 953 case CACHE_IMPOS_RCVD:
904 dprintk(" cache_impos_rcvd\n"); 954 dprintk_cont("cache_impos_rcvd\n");
905 MPOA_cache_impos_rcvd(mesg, mpc); 955 MPOA_cache_impos_rcvd(mesg, mpc);
906 break; 956 break;
907 case SET_MPC_CTRL_ADDR: 957 case SET_MPC_CTRL_ADDR:
908 dprintk(" set_mpc_ctrl_addr\n"); 958 dprintk_cont("set_mpc_ctrl_addr\n");
909 set_mpc_ctrl_addr_rcvd(mesg, mpc); 959 set_mpc_ctrl_addr_rcvd(mesg, mpc);
910 break; 960 break;
911 case SET_MPS_MAC_ADDR: 961 case SET_MPS_MAC_ADDR:
912 dprintk(" set_mps_mac_addr\n"); 962 dprintk_cont("set_mps_mac_addr\n");
913 set_mps_mac_addr_rcvd(mesg, mpc); 963 set_mps_mac_addr_rcvd(mesg, mpc);
914 break; 964 break;
915 case CLEAN_UP_AND_EXIT: 965 case CLEAN_UP_AND_EXIT:
916 dprintk(" clean_up_and_exit\n"); 966 dprintk_cont("clean_up_and_exit\n");
917 clean_up(mesg, mpc, DIE); 967 clean_up(mesg, mpc, DIE);
918 break; 968 break;
919 case RELOAD: 969 case RELOAD:
920 dprintk(" reload\n"); 970 dprintk_cont("reload\n");
921 clean_up(mesg, mpc, RELOAD); 971 clean_up(mesg, mpc, RELOAD);
922 break; 972 break;
923 case SET_MPC_PARAMS: 973 case SET_MPC_PARAMS:
924 dprintk(" set_mpc_params\n"); 974 dprintk_cont("set_mpc_params\n");
925 mpc->parameters = mesg->content.params; 975 mpc->parameters = mesg->content.params;
926 break; 976 break;
927 default: 977 default:
928 dprintk(" unknown message %d\n", mesg->type); 978 dprintk_cont("unknown message %d\n", mesg->type);
929 break; 979 break;
930 } 980 }
931 kfree_skb(skb); 981 kfree_skb(skb);
@@ -940,7 +990,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
940 struct sock *sk; 990 struct sock *sk;
941 991
942 if (mpc == NULL || !mpc->mpoad_vcc) { 992 if (mpc == NULL || !mpc->mpoad_vcc) {
943 printk("mpoa: msg_to_mpoad: mesg %d to a non-existent mpoad\n", mesg->type); 993 pr_info("mesg %d to a non-existent mpoad\n", mesg->type);
944 return -ENXIO; 994 return -ENXIO;
945 } 995 }
946 996
@@ -958,7 +1008,8 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
958 return 0; 1008 return 0;
959} 1009}
960 1010
961static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev_ptr) 1011static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
1012 unsigned long event, void *dev_ptr)
962{ 1013{
963 struct net_device *dev; 1014 struct net_device *dev;
964 struct mpoa_client *mpc; 1015 struct mpoa_client *mpc;
@@ -980,25 +1031,24 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
980 priv->lane2_ops->associate_indicator = lane2_assoc_ind; 1031 priv->lane2_ops->associate_indicator = lane2_assoc_ind;
981 mpc = find_mpc_by_itfnum(priv->itfnum); 1032 mpc = find_mpc_by_itfnum(priv->itfnum);
982 if (mpc == NULL) { 1033 if (mpc == NULL) {
983 dprintk("mpoa: mpoa_event_listener: allocating new mpc for %s\n", 1034 dprintk("allocating new mpc for %s\n", dev->name);
984 dev->name);
985 mpc = alloc_mpc(); 1035 mpc = alloc_mpc();
986 if (mpc == NULL) { 1036 if (mpc == NULL) {
987 printk("mpoa: mpoa_event_listener: no new mpc"); 1037 pr_info("no new mpc");
988 break; 1038 break;
989 } 1039 }
990 } 1040 }
991 mpc->dev_num = priv->itfnum; 1041 mpc->dev_num = priv->itfnum;
992 mpc->dev = dev; 1042 mpc->dev = dev;
993 dev_hold(dev); 1043 dev_hold(dev);
994 dprintk("mpoa: (%s) was initialized\n", dev->name); 1044 dprintk("(%s) was initialized\n", dev->name);
995 break; 1045 break;
996 case NETDEV_UNREGISTER: 1046 case NETDEV_UNREGISTER:
997 /* the lec device was deallocated */ 1047 /* the lec device was deallocated */
998 mpc = find_mpc_by_lec(dev); 1048 mpc = find_mpc_by_lec(dev);
999 if (mpc == NULL) 1049 if (mpc == NULL)
1000 break; 1050 break;
1001 dprintk("mpoa: device (%s) was deallocated\n", dev->name); 1051 dprintk("device (%s) was deallocated\n", dev->name);
1002 stop_mpc(mpc); 1052 stop_mpc(mpc);
1003 dev_put(mpc->dev); 1053 dev_put(mpc->dev);
1004 mpc->dev = NULL; 1054 mpc->dev = NULL;
@@ -1008,9 +1058,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
1008 mpc = find_mpc_by_lec(dev); 1058 mpc = find_mpc_by_lec(dev);
1009 if (mpc == NULL) 1059 if (mpc == NULL)
1010 break; 1060 break;
1011 if (mpc->mpoad_vcc != NULL) { 1061 if (mpc->mpoad_vcc != NULL)
1012 start_mpc(mpc, dev); 1062 start_mpc(mpc, dev);
1013 }
1014 break; 1063 break;
1015 case NETDEV_DOWN: 1064 case NETDEV_DOWN:
1016 /* the dev was ifconfig'ed down */ 1065 /* the dev was ifconfig'ed down */
@@ -1020,9 +1069,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
1020 mpc = find_mpc_by_lec(dev); 1069 mpc = find_mpc_by_lec(dev);
1021 if (mpc == NULL) 1070 if (mpc == NULL)
1022 break; 1071 break;
1023 if (mpc->mpoad_vcc != NULL) { 1072 if (mpc->mpoad_vcc != NULL)
1024 stop_mpc(mpc); 1073 stop_mpc(mpc);
1025 }
1026 break; 1074 break;
1027 case NETDEV_REBOOT: 1075 case NETDEV_REBOOT:
1028 case NETDEV_CHANGE: 1076 case NETDEV_CHANGE:
@@ -1049,7 +1097,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1049 in_cache_entry *entry; 1097 in_cache_entry *entry;
1050 1098
1051 entry = mpc->in_ops->get(dst_ip, mpc); 1099 entry = mpc->in_ops->get(dst_ip, mpc);
1052 if(entry == NULL){ 1100 if (entry == NULL) {
1053 entry = mpc->in_ops->add_entry(dst_ip, mpc); 1101 entry = mpc->in_ops->add_entry(dst_ip, mpc);
1054 entry->entry_state = INGRESS_RESOLVING; 1102 entry->entry_state = INGRESS_RESOLVING;
1055 msg->type = SND_MPOA_RES_RQST; 1103 msg->type = SND_MPOA_RES_RQST;
@@ -1060,7 +1108,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1060 return; 1108 return;
1061 } 1109 }
1062 1110
1063 if(entry->entry_state == INGRESS_INVALID){ 1111 if (entry->entry_state == INGRESS_INVALID) {
1064 entry->entry_state = INGRESS_RESOLVING; 1112 entry->entry_state = INGRESS_RESOLVING;
1065 msg->type = SND_MPOA_RES_RQST; 1113 msg->type = SND_MPOA_RES_RQST;
1066 msg->content.in_info = entry->ctrl_info; 1114 msg->content.in_info = entry->ctrl_info;
@@ -1070,7 +1118,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1070 return; 1118 return;
1071 } 1119 }
1072 1120
1073 printk("mpoa: (%s) MPOA_trigger_rcvd: entry already in resolving state\n", 1121 pr_info("(%s) entry already in resolving state\n",
1074 (mpc->dev) ? mpc->dev->name : "<unknown>"); 1122 (mpc->dev) ? mpc->dev->name : "<unknown>");
1075 mpc->in_ops->put(entry); 1123 mpc->in_ops->put(entry);
1076 return; 1124 return;
@@ -1080,23 +1128,25 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1080 * Things get complicated because we have to check if there's an egress 1128 * Things get complicated because we have to check if there's an egress
1081 * shortcut with suitable traffic parameters we could use. 1129 * shortcut with suitable traffic parameters we could use.
1082 */ 1130 */
1083static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) 1131static void check_qos_and_open_shortcut(struct k_message *msg,
1132 struct mpoa_client *client,
1133 in_cache_entry *entry)
1084{ 1134{
1085 __be32 dst_ip = msg->content.in_info.in_dst_ip; 1135 __be32 dst_ip = msg->content.in_info.in_dst_ip;
1086 struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip); 1136 struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip);
1087 eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client); 1137 eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client);
1088 1138
1089 if(eg_entry && eg_entry->shortcut){ 1139 if (eg_entry && eg_entry->shortcut) {
1090 if(eg_entry->shortcut->qos.txtp.traffic_class & 1140 if (eg_entry->shortcut->qos.txtp.traffic_class &
1091 msg->qos.txtp.traffic_class & 1141 msg->qos.txtp.traffic_class &
1092 (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)){ 1142 (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)) {
1093 if(eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR) 1143 if (eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR)
1094 entry->shortcut = eg_entry->shortcut; 1144 entry->shortcut = eg_entry->shortcut;
1095 else if(eg_entry->shortcut->qos.txtp.max_pcr > 0) 1145 else if (eg_entry->shortcut->qos.txtp.max_pcr > 0)
1096 entry->shortcut = eg_entry->shortcut; 1146 entry->shortcut = eg_entry->shortcut;
1097 } 1147 }
1098 if(entry->shortcut){ 1148 if (entry->shortcut) {
1099 dprintk("mpoa: (%s) using egress SVC to reach %pI4\n", 1149 dprintk("(%s) using egress SVC to reach %pI4\n",
1100 client->dev->name, &dst_ip); 1150 client->dev->name, &dst_ip);
1101 client->eg_ops->put(eg_entry); 1151 client->eg_ops->put(eg_entry);
1102 return; 1152 return;
@@ -1107,12 +1157,13 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
1107 1157
1108 /* No luck in the egress cache we must open an ingress SVC */ 1158 /* No luck in the egress cache we must open an ingress SVC */
1109 msg->type = OPEN_INGRESS_SVC; 1159 msg->type = OPEN_INGRESS_SVC;
1110 if (qos && (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) 1160 if (qos &&
1111 { 1161 (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) {
1112 msg->qos = qos->qos; 1162 msg->qos = qos->qos;
1113 printk("mpoa: (%s) trying to get a CBR shortcut\n",client->dev->name); 1163 pr_info("(%s) trying to get a CBR shortcut\n",
1114 } 1164 client->dev->name);
1115 else memset(&msg->qos,0,sizeof(struct atm_qos)); 1165 } else
1166 memset(&msg->qos, 0, sizeof(struct atm_qos));
1116 msg_to_mpoad(msg, client); 1167 msg_to_mpoad(msg, client);
1117 return; 1168 return;
1118} 1169}
@@ -1122,17 +1173,19 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1122 __be32 dst_ip = msg->content.in_info.in_dst_ip; 1173 __be32 dst_ip = msg->content.in_info.in_dst_ip;
1123 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); 1174 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
1124 1175
1125 dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %pI4\n", 1176 dprintk("(%s) ip %pI4\n",
1126 mpc->dev->name, &dst_ip); 1177 mpc->dev->name, &dst_ip);
1127 ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry); 1178 ddprintk("(%s) entry = %p",
1128 if(entry == NULL){ 1179 mpc->dev->name, entry);
1129 printk("\nmpoa: (%s) ARGH, received res. reply for an entry that doesn't exist.\n", mpc->dev->name); 1180 if (entry == NULL) {
1181 pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n",
1182 mpc->dev->name);
1130 return; 1183 return;
1131 } 1184 }
1132 ddprintk(" entry_state = %d ", entry->entry_state); 1185 ddprintk_cont(" entry_state = %d ", entry->entry_state);
1133 1186
1134 if (entry->entry_state == INGRESS_RESOLVED) { 1187 if (entry->entry_state == INGRESS_RESOLVED) {
1135 printk("\nmpoa: (%s) MPOA_res_reply_rcvd for RESOLVED entry!\n", mpc->dev->name); 1188 pr_info("(%s) RESOLVED entry!\n", mpc->dev->name);
1136 mpc->in_ops->put(entry); 1189 mpc->in_ops->put(entry);
1137 return; 1190 return;
1138 } 1191 }
@@ -1141,17 +1194,18 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1141 do_gettimeofday(&(entry->tv)); 1194 do_gettimeofday(&(entry->tv));
1142 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */ 1195 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */
1143 entry->refresh_time = 0; 1196 entry->refresh_time = 0;
1144 ddprintk("entry->shortcut = %p\n", entry->shortcut); 1197 ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
1145 1198
1146 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL){ 1199 if (entry->entry_state == INGRESS_RESOLVING &&
1200 entry->shortcut != NULL) {
1147 entry->entry_state = INGRESS_RESOLVED; 1201 entry->entry_state = INGRESS_RESOLVED;
1148 mpc->in_ops->put(entry); 1202 mpc->in_ops->put(entry);
1149 return; /* Shortcut already open... */ 1203 return; /* Shortcut already open... */
1150 } 1204 }
1151 1205
1152 if (entry->shortcut != NULL) { 1206 if (entry->shortcut != NULL) {
1153 printk("mpoa: (%s) MPOA_res_reply_rcvd: entry->shortcut != NULL, impossible!\n", 1207 pr_info("(%s) entry->shortcut != NULL, impossible!\n",
1154 mpc->dev->name); 1208 mpc->dev->name);
1155 mpc->in_ops->put(entry); 1209 mpc->in_ops->put(entry);
1156 return; 1210 return;
1157 } 1211 }
@@ -1170,14 +1224,14 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1170 __be32 mask = msg->ip_mask; 1224 __be32 mask = msg->ip_mask;
1171 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); 1225 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
1172 1226
1173 if(entry == NULL){ 1227 if (entry == NULL) {
1174 printk("mpoa: (%s) ingress_purge_rcvd: purge for a non-existing entry, ip = %pI4\n", 1228 pr_info("(%s) purge for a non-existing entry, ip = %pI4\n",
1175 mpc->dev->name, &dst_ip); 1229 mpc->dev->name, &dst_ip);
1176 return; 1230 return;
1177 } 1231 }
1178 1232
1179 do { 1233 do {
1180 dprintk("mpoa: (%s) ingress_purge_rcvd: removing an ingress entry, ip = %pI4\n", 1234 dprintk("(%s) removing an ingress entry, ip = %pI4\n",
1181 mpc->dev->name, &dst_ip); 1235 mpc->dev->name, &dst_ip);
1182 write_lock_bh(&mpc->ingress_lock); 1236 write_lock_bh(&mpc->ingress_lock);
1183 mpc->in_ops->remove_entry(entry, mpc); 1237 mpc->in_ops->remove_entry(entry, mpc);
@@ -1195,7 +1249,8 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1195 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); 1249 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc);
1196 1250
1197 if (entry == NULL) { 1251 if (entry == NULL) {
1198 dprintk("mpoa: (%s) egress_purge_rcvd: purge for a non-existing entry\n", mpc->dev->name); 1252 dprintk("(%s) purge for a non-existing entry\n",
1253 mpc->dev->name);
1199 return; 1254 return;
1200 } 1255 }
1201 1256
@@ -1214,15 +1269,15 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1214 struct k_message *purge_msg; 1269 struct k_message *purge_msg;
1215 struct sk_buff *skb; 1270 struct sk_buff *skb;
1216 1271
1217 dprintk("mpoa: purge_egress_shortcut: entering\n"); 1272 dprintk("entering\n");
1218 if (vcc == NULL) { 1273 if (vcc == NULL) {
1219 printk("mpoa: purge_egress_shortcut: vcc == NULL\n"); 1274 pr_info("vcc == NULL\n");
1220 return; 1275 return;
1221 } 1276 }
1222 1277
1223 skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC); 1278 skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
1224 if (skb == NULL) { 1279 if (skb == NULL) {
1225 printk("mpoa: purge_egress_shortcut: out of memory\n"); 1280 pr_info("out of memory\n");
1226 return; 1281 return;
1227 } 1282 }
1228 1283
@@ -1238,7 +1293,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1238 sk = sk_atm(vcc); 1293 sk = sk_atm(vcc);
1239 skb_queue_tail(&sk->sk_receive_queue, skb); 1294 skb_queue_tail(&sk->sk_receive_queue, skb);
1240 sk->sk_data_ready(sk, skb->len); 1295 sk->sk_data_ready(sk, skb->len);
1241 dprintk("mpoa: purge_egress_shortcut: exiting:\n"); 1296 dprintk("exiting\n");
1242 1297
1243 return; 1298 return;
1244} 1299}
@@ -1247,14 +1302,14 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1247 * Our MPS died. Tell our daemon to send NHRP data plane purge to each 1302 * Our MPS died. Tell our daemon to send NHRP data plane purge to each
1248 * of the egress shortcuts we have. 1303 * of the egress shortcuts we have.
1249 */ 1304 */
1250static void mps_death( struct k_message * msg, struct mpoa_client * mpc ) 1305static void mps_death(struct k_message *msg, struct mpoa_client *mpc)
1251{ 1306{
1252 eg_cache_entry *entry; 1307 eg_cache_entry *entry;
1253 1308
1254 dprintk("mpoa: (%s) mps_death:\n", mpc->dev->name); 1309 dprintk("(%s)\n", mpc->dev->name);
1255 1310
1256 if(memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)){ 1311 if (memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)) {
1257 printk("mpoa: (%s) mps_death: wrong MPS\n", mpc->dev->name); 1312 pr_info("(%s) wrong MPS\n", mpc->dev->name);
1258 return; 1313 return;
1259 } 1314 }
1260 1315
@@ -1273,20 +1328,21 @@ static void mps_death( struct k_message * msg, struct mpoa_client * mpc )
1273 return; 1328 return;
1274} 1329}
1275 1330
1276static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client * mpc) 1331static void MPOA_cache_impos_rcvd(struct k_message *msg,
1332 struct mpoa_client *mpc)
1277{ 1333{
1278 uint16_t holding_time; 1334 uint16_t holding_time;
1279 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc); 1335 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc);
1280 1336
1281 holding_time = msg->content.eg_info.holding_time; 1337 holding_time = msg->content.eg_info.holding_time;
1282 dprintk("mpoa: (%s) MPOA_cache_impos_rcvd: entry = %p, holding_time = %u\n", 1338 dprintk("(%s) entry = %p, holding_time = %u\n",
1283 mpc->dev->name, entry, holding_time); 1339 mpc->dev->name, entry, holding_time);
1284 if(entry == NULL && holding_time) { 1340 if (entry == NULL && holding_time) {
1285 entry = mpc->eg_ops->add_entry(msg, mpc); 1341 entry = mpc->eg_ops->add_entry(msg, mpc);
1286 mpc->eg_ops->put(entry); 1342 mpc->eg_ops->put(entry);
1287 return; 1343 return;
1288 } 1344 }
1289 if(holding_time){ 1345 if (holding_time) {
1290 mpc->eg_ops->update(entry, holding_time); 1346 mpc->eg_ops->update(entry, holding_time);
1291 return; 1347 return;
1292 } 1348 }
@@ -1300,7 +1356,8 @@ static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client *
1300 return; 1356 return;
1301} 1357}
1302 1358
1303static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc) 1359static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
1360 struct mpoa_client *mpc)
1304{ 1361{
1305 struct lec_priv *priv; 1362 struct lec_priv *priv;
1306 int i, retval ; 1363 int i, retval ;
@@ -1315,34 +1372,39 @@ static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *m
1315 memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */ 1372 memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */
1316 memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN); 1373 memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN);
1317 1374
1318 dprintk("mpoa: (%s) setting MPC ctrl ATM address to ", 1375 dprintk("(%s) setting MPC ctrl ATM address to",
1319 (mpc->dev) ? mpc->dev->name : "<unknown>"); 1376 mpc->dev ? mpc->dev->name : "<unknown>");
1320 for (i = 7; i < sizeof(tlv); i++) 1377 for (i = 7; i < sizeof(tlv); i++)
1321 dprintk("%02x ", tlv[i]); 1378 dprintk_cont(" %02x", tlv[i]);
1322 dprintk("\n"); 1379 dprintk_cont("\n");
1323 1380
1324 if (mpc->dev) { 1381 if (mpc->dev) {
1325 priv = netdev_priv(mpc->dev); 1382 priv = netdev_priv(mpc->dev);
1326 retval = priv->lane2_ops->associate_req(mpc->dev, mpc->dev->dev_addr, tlv, sizeof(tlv)); 1383 retval = priv->lane2_ops->associate_req(mpc->dev,
1384 mpc->dev->dev_addr,
1385 tlv, sizeof(tlv));
1327 if (retval == 0) 1386 if (retval == 0)
1328 printk("mpoa: (%s) MPOA device type TLV association failed\n", mpc->dev->name); 1387 pr_info("(%s) MPOA device type TLV association failed\n",
1388 mpc->dev->name);
1329 retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL); 1389 retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL);
1330 if (retval < 0) 1390 if (retval < 0)
1331 printk("mpoa: (%s) targetless LE_ARP request failed\n", mpc->dev->name); 1391 pr_info("(%s) targetless LE_ARP request failed\n",
1392 mpc->dev->name);
1332 } 1393 }
1333 1394
1334 return; 1395 return;
1335} 1396}
1336 1397
1337static void set_mps_mac_addr_rcvd(struct k_message *msg, struct mpoa_client *client) 1398static void set_mps_mac_addr_rcvd(struct k_message *msg,
1399 struct mpoa_client *client)
1338{ 1400{
1339 1401
1340 if(client->number_of_mps_macs) 1402 if (client->number_of_mps_macs)
1341 kfree(client->mps_macs); 1403 kfree(client->mps_macs);
1342 client->number_of_mps_macs = 0; 1404 client->number_of_mps_macs = 0;
1343 client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL); 1405 client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL);
1344 if (client->mps_macs == NULL) { 1406 if (client->mps_macs == NULL) {
1345 printk("mpoa: set_mps_mac_addr_rcvd: out of memory\n"); 1407 pr_info("out of memory\n");
1346 return; 1408 return;
1347 } 1409 }
1348 client->number_of_mps_macs = 1; 1410 client->number_of_mps_macs = 1;
@@ -1363,11 +1425,11 @@ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action)
1363 /* FIXME: This knows too much of the cache structure */ 1425 /* FIXME: This knows too much of the cache structure */
1364 read_lock_irq(&mpc->egress_lock); 1426 read_lock_irq(&mpc->egress_lock);
1365 entry = mpc->eg_cache; 1427 entry = mpc->eg_cache;
1366 while (entry != NULL){ 1428 while (entry != NULL) {
1367 msg->content.eg_info = entry->ctrl_info; 1429 msg->content.eg_info = entry->ctrl_info;
1368 dprintk("mpoa: cache_id %u\n", entry->ctrl_info.cache_id); 1430 dprintk("cache_id %u\n", entry->ctrl_info.cache_id);
1369 msg_to_mpoad(msg, mpc); 1431 msg_to_mpoad(msg, mpc);
1370 entry = entry->next; 1432 entry = entry->next;
1371 } 1433 }
1372 read_unlock_irq(&mpc->egress_lock); 1434 read_unlock_irq(&mpc->egress_lock);
1373 1435
@@ -1386,20 +1448,22 @@ static void mpc_timer_refresh(void)
1386 return; 1448 return;
1387} 1449}
1388 1450
1389static void mpc_cache_check( unsigned long checking_time ) 1451static void mpc_cache_check(unsigned long checking_time)
1390{ 1452{
1391 struct mpoa_client *mpc = mpcs; 1453 struct mpoa_client *mpc = mpcs;
1392 static unsigned long previous_resolving_check_time; 1454 static unsigned long previous_resolving_check_time;
1393 static unsigned long previous_refresh_time; 1455 static unsigned long previous_refresh_time;
1394 1456
1395 while( mpc != NULL ){ 1457 while (mpc != NULL) {
1396 mpc->in_ops->clear_count(mpc); 1458 mpc->in_ops->clear_count(mpc);
1397 mpc->eg_ops->clear_expired(mpc); 1459 mpc->eg_ops->clear_expired(mpc);
1398 if(checking_time - previous_resolving_check_time > mpc->parameters.mpc_p4 * HZ ){ 1460 if (checking_time - previous_resolving_check_time >
1461 mpc->parameters.mpc_p4 * HZ) {
1399 mpc->in_ops->check_resolving(mpc); 1462 mpc->in_ops->check_resolving(mpc);
1400 previous_resolving_check_time = checking_time; 1463 previous_resolving_check_time = checking_time;
1401 } 1464 }
1402 if(checking_time - previous_refresh_time > mpc->parameters.mpc_p5 * HZ ){ 1465 if (checking_time - previous_refresh_time >
1466 mpc->parameters.mpc_p5 * HZ) {
1403 mpc->in_ops->refresh(mpc); 1467 mpc->in_ops->refresh(mpc);
1404 previous_refresh_time = checking_time; 1468 previous_refresh_time = checking_time;
1405 } 1469 }
@@ -1410,7 +1474,8 @@ static void mpc_cache_check( unsigned long checking_time )
1410 return; 1474 return;
1411} 1475}
1412 1476
1413static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1477static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd,
1478 unsigned long arg)
1414{ 1479{
1415 int err = 0; 1480 int err = 0;
1416 struct atm_vcc *vcc = ATM_SD(sock); 1481 struct atm_vcc *vcc = ATM_SD(sock);
@@ -1422,21 +1487,20 @@ static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
1422 return -EPERM; 1487 return -EPERM;
1423 1488
1424 switch (cmd) { 1489 switch (cmd) {
1425 case ATMMPC_CTRL: 1490 case ATMMPC_CTRL:
1426 err = atm_mpoa_mpoad_attach(vcc, (int)arg); 1491 err = atm_mpoa_mpoad_attach(vcc, (int)arg);
1427 if (err >= 0) 1492 if (err >= 0)
1428 sock->state = SS_CONNECTED; 1493 sock->state = SS_CONNECTED;
1429 break; 1494 break;
1430 case ATMMPC_DATA: 1495 case ATMMPC_DATA:
1431 err = atm_mpoa_vcc_attach(vcc, (void __user *)arg); 1496 err = atm_mpoa_vcc_attach(vcc, (void __user *)arg);
1432 break; 1497 break;
1433 default: 1498 default:
1434 break; 1499 break;
1435 } 1500 }
1436 return err; 1501 return err;
1437} 1502}
1438 1503
1439
1440static struct atm_ioctl atm_ioctl_ops = { 1504static struct atm_ioctl atm_ioctl_ops = {
1441 .owner = THIS_MODULE, 1505 .owner = THIS_MODULE,
1442 .ioctl = atm_mpoa_ioctl, 1506 .ioctl = atm_mpoa_ioctl,
@@ -1447,9 +1511,9 @@ static __init int atm_mpoa_init(void)
1447 register_atm_ioctl(&atm_ioctl_ops); 1511 register_atm_ioctl(&atm_ioctl_ops);
1448 1512
1449 if (mpc_proc_init() != 0) 1513 if (mpc_proc_init() != 0)
1450 printk(KERN_INFO "mpoa: failed to initialize /proc/mpoa\n"); 1514 pr_info("failed to initialize /proc/mpoa\n");
1451 1515
1452 printk("mpc.c: " __DATE__ " " __TIME__ " initialized\n"); 1516 pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n");
1453 1517
1454 return 0; 1518 return 0;
1455} 1519}
@@ -1476,15 +1540,15 @@ static void __exit atm_mpoa_cleanup(void)
1476 if (priv->lane2_ops != NULL) 1540 if (priv->lane2_ops != NULL)
1477 priv->lane2_ops->associate_indicator = NULL; 1541 priv->lane2_ops->associate_indicator = NULL;
1478 } 1542 }
1479 ddprintk("mpoa: cleanup_module: about to clear caches\n"); 1543 ddprintk("about to clear caches\n");
1480 mpc->in_ops->destroy_cache(mpc); 1544 mpc->in_ops->destroy_cache(mpc);
1481 mpc->eg_ops->destroy_cache(mpc); 1545 mpc->eg_ops->destroy_cache(mpc);
1482 ddprintk("mpoa: cleanup_module: caches cleared\n"); 1546 ddprintk("caches cleared\n");
1483 kfree(mpc->mps_macs); 1547 kfree(mpc->mps_macs);
1484 memset(mpc, 0, sizeof(struct mpoa_client)); 1548 memset(mpc, 0, sizeof(struct mpoa_client));
1485 ddprintk("mpoa: cleanup_module: about to kfree %p\n", mpc); 1549 ddprintk("about to kfree %p\n", mpc);
1486 kfree(mpc); 1550 kfree(mpc);
1487 ddprintk("mpoa: cleanup_module: next mpc is at %p\n", tmp); 1551 ddprintk("next mpc is at %p\n", tmp);
1488 mpc = tmp; 1552 mpc = tmp;
1489 } 1553 }
1490 1554
@@ -1492,7 +1556,7 @@ static void __exit atm_mpoa_cleanup(void)
1492 qos_head = NULL; 1556 qos_head = NULL;
1493 while (qos != NULL) { 1557 while (qos != NULL) {
1494 nextqos = qos->next; 1558 nextqos = qos->next;
1495 dprintk("mpoa: cleanup_module: freeing qos entry %p\n", qos); 1559 dprintk("freeing qos entry %p\n", qos);
1496 kfree(qos); 1560 kfree(qos);
1497 qos = nextqos; 1561 qos = nextqos;
1498 } 1562 }
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index 4504a4b339b..4c141810eb6 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -11,15 +11,23 @@
11 */ 11 */
12 12
13#if 0 13#if 0
14#define dprintk printk /* debug */ 14#define dprintk(format, args...) \
15 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
15#else 16#else
16#define dprintk(format,args...) 17#define dprintk(format, args...) \
18 do { if (0) \
19 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
20 } while (0)
17#endif 21#endif
18 22
19#if 0 23#if 0
20#define ddprintk printk /* more debug */ 24#define ddprintk(format, args...) \
25 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
21#else 26#else
22#define ddprintk(format,args...) 27#define ddprintk(format, args...) \
28 do { if (0) \
29 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
30 } while (0)
23#endif 31#endif
24 32
25static in_cache_entry *in_cache_get(__be32 dst_ip, 33static in_cache_entry *in_cache_get(__be32 dst_ip,
@@ -29,8 +37,8 @@ static in_cache_entry *in_cache_get(__be32 dst_ip,
29 37
30 read_lock_bh(&client->ingress_lock); 38 read_lock_bh(&client->ingress_lock);
31 entry = client->in_cache; 39 entry = client->in_cache;
32 while(entry != NULL){ 40 while (entry != NULL) {
33 if( entry->ctrl_info.in_dst_ip == dst_ip ){ 41 if (entry->ctrl_info.in_dst_ip == dst_ip) {
34 atomic_inc(&entry->use); 42 atomic_inc(&entry->use);
35 read_unlock_bh(&client->ingress_lock); 43 read_unlock_bh(&client->ingress_lock);
36 return entry; 44 return entry;
@@ -50,8 +58,8 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
50 58
51 read_lock_bh(&client->ingress_lock); 59 read_lock_bh(&client->ingress_lock);
52 entry = client->in_cache; 60 entry = client->in_cache;
53 while(entry != NULL){ 61 while (entry != NULL) {
54 if((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask )){ 62 if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) {
55 atomic_inc(&entry->use); 63 atomic_inc(&entry->use);
56 read_unlock_bh(&client->ingress_lock); 64 read_unlock_bh(&client->ingress_lock);
57 return entry; 65 return entry;
@@ -65,14 +73,14 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
65} 73}
66 74
67static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc, 75static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
68 struct mpoa_client *client ) 76 struct mpoa_client *client)
69{ 77{
70 in_cache_entry *entry; 78 in_cache_entry *entry;
71 79
72 read_lock_bh(&client->ingress_lock); 80 read_lock_bh(&client->ingress_lock);
73 entry = client->in_cache; 81 entry = client->in_cache;
74 while(entry != NULL){ 82 while (entry != NULL) {
75 if(entry->shortcut == vcc) { 83 if (entry->shortcut == vcc) {
76 atomic_inc(&entry->use); 84 atomic_inc(&entry->use);
77 read_unlock_bh(&client->ingress_lock); 85 read_unlock_bh(&client->ingress_lock);
78 return entry; 86 return entry;
@@ -90,14 +98,14 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
90 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL); 98 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL);
91 99
92 if (entry == NULL) { 100 if (entry == NULL) {
93 printk("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n"); 101 pr_info("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
94 return NULL; 102 return NULL;
95 } 103 }
96 104
97 dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %pI4\n", &dst_ip); 105 dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip);
98 106
99 atomic_set(&entry->use, 1); 107 atomic_set(&entry->use, 1);
100 dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: about to lock\n"); 108 dprintk("new_in_cache_entry: about to lock\n");
101 write_lock_bh(&client->ingress_lock); 109 write_lock_bh(&client->ingress_lock);
102 entry->next = client->in_cache; 110 entry->next = client->in_cache;
103 entry->prev = NULL; 111 entry->prev = NULL;
@@ -115,7 +123,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
115 atomic_inc(&entry->use); 123 atomic_inc(&entry->use);
116 124
117 write_unlock_bh(&client->ingress_lock); 125 write_unlock_bh(&client->ingress_lock);
118 dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: unlocked\n"); 126 dprintk("new_in_cache_entry: unlocked\n");
119 127
120 return entry; 128 return entry;
121} 129}
@@ -126,39 +134,41 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
126 struct k_message msg; 134 struct k_message msg;
127 135
128 entry->count++; 136 entry->count++;
129 if(entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL) 137 if (entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL)
130 return OPEN; 138 return OPEN;
131 139
132 if(entry->entry_state == INGRESS_REFRESHING){ 140 if (entry->entry_state == INGRESS_REFRESHING) {
133 if(entry->count > mpc->parameters.mpc_p1){ 141 if (entry->count > mpc->parameters.mpc_p1) {
134 msg.type = SND_MPOA_RES_RQST; 142 msg.type = SND_MPOA_RES_RQST;
135 msg.content.in_info = entry->ctrl_info; 143 msg.content.in_info = entry->ctrl_info;
136 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN); 144 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
137 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 145 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
138 if (qos != NULL) msg.qos = qos->qos; 146 if (qos != NULL)
147 msg.qos = qos->qos;
139 msg_to_mpoad(&msg, mpc); 148 msg_to_mpoad(&msg, mpc);
140 do_gettimeofday(&(entry->reply_wait)); 149 do_gettimeofday(&(entry->reply_wait));
141 entry->entry_state = INGRESS_RESOLVING; 150 entry->entry_state = INGRESS_RESOLVING;
142 } 151 }
143 if(entry->shortcut != NULL) 152 if (entry->shortcut != NULL)
144 return OPEN; 153 return OPEN;
145 return CLOSED; 154 return CLOSED;
146 } 155 }
147 156
148 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL) 157 if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL)
149 return OPEN; 158 return OPEN;
150 159
151 if( entry->count > mpc->parameters.mpc_p1 && 160 if (entry->count > mpc->parameters.mpc_p1 &&
152 entry->entry_state == INGRESS_INVALID){ 161 entry->entry_state == INGRESS_INVALID) {
153 dprintk("mpoa: (%s) mpoa_caches.c: threshold exceeded for ip %pI4, sending MPOA res req\n", 162 dprintk("(%s) threshold exceeded for ip %pI4, sending MPOA res req\n",
154 mpc->dev->name, &entry->ctrl_info.in_dst_ip); 163 mpc->dev->name, &entry->ctrl_info.in_dst_ip);
155 entry->entry_state = INGRESS_RESOLVING; 164 entry->entry_state = INGRESS_RESOLVING;
156 msg.type = SND_MPOA_RES_RQST; 165 msg.type = SND_MPOA_RES_RQST;
157 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN ); 166 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
158 msg.content.in_info = entry->ctrl_info; 167 msg.content.in_info = entry->ctrl_info;
159 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 168 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
160 if (qos != NULL) msg.qos = qos->qos; 169 if (qos != NULL)
161 msg_to_mpoad( &msg, mpc); 170 msg.qos = qos->qos;
171 msg_to_mpoad(&msg, mpc);
162 do_gettimeofday(&(entry->reply_wait)); 172 do_gettimeofday(&(entry->reply_wait));
163 } 173 }
164 174
@@ -185,7 +195,7 @@ static void in_cache_remove_entry(in_cache_entry *entry,
185 struct k_message msg; 195 struct k_message msg;
186 196
187 vcc = entry->shortcut; 197 vcc = entry->shortcut;
188 dprintk("mpoa: mpoa_caches.c: removing an ingress entry, ip = %pI4\n", 198 dprintk("removing an ingress entry, ip = %pI4\n",
189 &entry->ctrl_info.in_dst_ip); 199 &entry->ctrl_info.in_dst_ip);
190 200
191 if (entry->prev != NULL) 201 if (entry->prev != NULL)
@@ -195,14 +205,15 @@ static void in_cache_remove_entry(in_cache_entry *entry,
195 if (entry->next != NULL) 205 if (entry->next != NULL)
196 entry->next->prev = entry->prev; 206 entry->next->prev = entry->prev;
197 client->in_ops->put(entry); 207 client->in_ops->put(entry);
198 if(client->in_cache == NULL && client->eg_cache == NULL){ 208 if (client->in_cache == NULL && client->eg_cache == NULL) {
199 msg.type = STOP_KEEP_ALIVE_SM; 209 msg.type = STOP_KEEP_ALIVE_SM;
200 msg_to_mpoad(&msg,client); 210 msg_to_mpoad(&msg, client);
201 } 211 }
202 212
203 /* Check if the egress side still uses this VCC */ 213 /* Check if the egress side still uses this VCC */
204 if (vcc != NULL) { 214 if (vcc != NULL) {
205 eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc, client); 215 eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc,
216 client);
206 if (eg_entry != NULL) { 217 if (eg_entry != NULL) {
207 client->eg_ops->put(eg_entry); 218 client->eg_ops->put(eg_entry);
208 return; 219 return;
@@ -213,7 +224,6 @@ static void in_cache_remove_entry(in_cache_entry *entry,
213 return; 224 return;
214} 225}
215 226
216
217/* Call this every MPC-p2 seconds... Not exactly correct solution, 227/* Call this every MPC-p2 seconds... Not exactly correct solution,
218 but an easy one... */ 228 but an easy one... */
219static void clear_count_and_expired(struct mpoa_client *client) 229static void clear_count_and_expired(struct mpoa_client *client)
@@ -225,12 +235,12 @@ static void clear_count_and_expired(struct mpoa_client *client)
225 235
226 write_lock_bh(&client->ingress_lock); 236 write_lock_bh(&client->ingress_lock);
227 entry = client->in_cache; 237 entry = client->in_cache;
228 while(entry != NULL){ 238 while (entry != NULL) {
229 entry->count=0; 239 entry->count = 0;
230 next_entry = entry->next; 240 next_entry = entry->next;
231 if((now.tv_sec - entry->tv.tv_sec) 241 if ((now.tv_sec - entry->tv.tv_sec)
232 > entry->ctrl_info.holding_time){ 242 > entry->ctrl_info.holding_time) {
233 dprintk("mpoa: mpoa_caches.c: holding time expired, ip = %pI4\n", 243 dprintk("holding time expired, ip = %pI4\n",
234 &entry->ctrl_info.in_dst_ip); 244 &entry->ctrl_info.in_dst_ip);
235 client->in_ops->remove_entry(entry, client); 245 client->in_ops->remove_entry(entry, client);
236 } 246 }
@@ -250,33 +260,38 @@ static void check_resolving_entries(struct mpoa_client *client)
250 struct timeval now; 260 struct timeval now;
251 struct k_message msg; 261 struct k_message msg;
252 262
253 do_gettimeofday( &now ); 263 do_gettimeofday(&now);
254 264
255 read_lock_bh(&client->ingress_lock); 265 read_lock_bh(&client->ingress_lock);
256 entry = client->in_cache; 266 entry = client->in_cache;
257 while( entry != NULL ){ 267 while (entry != NULL) {
258 if(entry->entry_state == INGRESS_RESOLVING){ 268 if (entry->entry_state == INGRESS_RESOLVING) {
259 if(now.tv_sec - entry->hold_down.tv_sec < client->parameters.mpc_p6){ 269 if ((now.tv_sec - entry->hold_down.tv_sec) <
260 entry = entry->next; /* Entry in hold down */ 270 client->parameters.mpc_p6) {
271 entry = entry->next; /* Entry in hold down */
261 continue; 272 continue;
262 } 273 }
263 if( (now.tv_sec - entry->reply_wait.tv_sec) > 274 if ((now.tv_sec - entry->reply_wait.tv_sec) >
264 entry->retry_time ){ 275 entry->retry_time) {
265 entry->retry_time = MPC_C1*( entry->retry_time ); 276 entry->retry_time = MPC_C1 * (entry->retry_time);
266 if(entry->retry_time > client->parameters.mpc_p5){ 277 /*
267 /* Retry time maximum exceeded, put entry in hold down. */ 278 * Retry time maximum exceeded,
279 * put entry in hold down.
280 */
281 if (entry->retry_time > client->parameters.mpc_p5) {
268 do_gettimeofday(&(entry->hold_down)); 282 do_gettimeofday(&(entry->hold_down));
269 entry->retry_time = client->parameters.mpc_p4; 283 entry->retry_time = client->parameters.mpc_p4;
270 entry = entry->next; 284 entry = entry->next;
271 continue; 285 continue;
272 } 286 }
273 /* Ask daemon to send a resolution request. */ 287 /* Ask daemon to send a resolution request. */
274 memset(&(entry->hold_down),0,sizeof(struct timeval)); 288 memset(&(entry->hold_down), 0, sizeof(struct timeval));
275 msg.type = SND_MPOA_RES_RTRY; 289 msg.type = SND_MPOA_RES_RTRY;
276 memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN); 290 memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
277 msg.content.in_info = entry->ctrl_info; 291 msg.content.in_info = entry->ctrl_info;
278 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 292 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
279 if (qos != NULL) msg.qos = qos->qos; 293 if (qos != NULL)
294 msg.qos = qos->qos;
280 msg_to_mpoad(&msg, client); 295 msg_to_mpoad(&msg, client);
281 do_gettimeofday(&(entry->reply_wait)); 296 do_gettimeofday(&(entry->reply_wait));
282 } 297 }
@@ -292,16 +307,17 @@ static void refresh_entries(struct mpoa_client *client)
292 struct timeval now; 307 struct timeval now;
293 struct in_cache_entry *entry = client->in_cache; 308 struct in_cache_entry *entry = client->in_cache;
294 309
295 ddprintk("mpoa: mpoa_caches.c: refresh_entries\n"); 310 ddprintk("refresh_entries\n");
296 do_gettimeofday(&now); 311 do_gettimeofday(&now);
297 312
298 read_lock_bh(&client->ingress_lock); 313 read_lock_bh(&client->ingress_lock);
299 while( entry != NULL ){ 314 while (entry != NULL) {
300 if( entry->entry_state == INGRESS_RESOLVED ){ 315 if (entry->entry_state == INGRESS_RESOLVED) {
301 if(!(entry->refresh_time)) 316 if (!(entry->refresh_time))
302 entry->refresh_time = (2*(entry->ctrl_info.holding_time))/3; 317 entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
303 if( (now.tv_sec - entry->reply_wait.tv_sec) > entry->refresh_time ){ 318 if ((now.tv_sec - entry->reply_wait.tv_sec) >
304 dprintk("mpoa: mpoa_caches.c: refreshing an entry.\n"); 319 entry->refresh_time) {
320 dprintk("refreshing an entry.\n");
305 entry->entry_state = INGRESS_REFRESHING; 321 entry->entry_state = INGRESS_REFRESHING;
306 322
307 } 323 }
@@ -314,21 +330,22 @@ static void refresh_entries(struct mpoa_client *client)
314static void in_destroy_cache(struct mpoa_client *mpc) 330static void in_destroy_cache(struct mpoa_client *mpc)
315{ 331{
316 write_lock_irq(&mpc->ingress_lock); 332 write_lock_irq(&mpc->ingress_lock);
317 while(mpc->in_cache != NULL) 333 while (mpc->in_cache != NULL)
318 mpc->in_ops->remove_entry(mpc->in_cache, mpc); 334 mpc->in_ops->remove_entry(mpc->in_cache, mpc);
319 write_unlock_irq(&mpc->ingress_lock); 335 write_unlock_irq(&mpc->ingress_lock);
320 336
321 return; 337 return;
322} 338}
323 339
324static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, struct mpoa_client *mpc) 340static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
341 struct mpoa_client *mpc)
325{ 342{
326 eg_cache_entry *entry; 343 eg_cache_entry *entry;
327 344
328 read_lock_irq(&mpc->egress_lock); 345 read_lock_irq(&mpc->egress_lock);
329 entry = mpc->eg_cache; 346 entry = mpc->eg_cache;
330 while(entry != NULL){ 347 while (entry != NULL) {
331 if(entry->ctrl_info.cache_id == cache_id){ 348 if (entry->ctrl_info.cache_id == cache_id) {
332 atomic_inc(&entry->use); 349 atomic_inc(&entry->use);
333 read_unlock_irq(&mpc->egress_lock); 350 read_unlock_irq(&mpc->egress_lock);
334 return entry; 351 return entry;
@@ -348,7 +365,7 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
348 365
349 read_lock_irqsave(&mpc->egress_lock, flags); 366 read_lock_irqsave(&mpc->egress_lock, flags);
350 entry = mpc->eg_cache; 367 entry = mpc->eg_cache;
351 while (entry != NULL){ 368 while (entry != NULL) {
352 if (entry->ctrl_info.tag == tag) { 369 if (entry->ctrl_info.tag == tag) {
353 atomic_inc(&entry->use); 370 atomic_inc(&entry->use);
354 read_unlock_irqrestore(&mpc->egress_lock, flags); 371 read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -362,14 +379,15 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
362} 379}
363 380
364/* This can be called from any context since it saves CPU flags */ 381/* This can be called from any context since it saves CPU flags */
365static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_client *mpc) 382static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc,
383 struct mpoa_client *mpc)
366{ 384{
367 unsigned long flags; 385 unsigned long flags;
368 eg_cache_entry *entry; 386 eg_cache_entry *entry;
369 387
370 read_lock_irqsave(&mpc->egress_lock, flags); 388 read_lock_irqsave(&mpc->egress_lock, flags);
371 entry = mpc->eg_cache; 389 entry = mpc->eg_cache;
372 while (entry != NULL){ 390 while (entry != NULL) {
373 if (entry->shortcut == vcc) { 391 if (entry->shortcut == vcc) {
374 atomic_inc(&entry->use); 392 atomic_inc(&entry->use);
375 read_unlock_irqrestore(&mpc->egress_lock, flags); 393 read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -382,14 +400,15 @@ static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_clie
382 return NULL; 400 return NULL;
383} 401}
384 402
385static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, struct mpoa_client *mpc) 403static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
404 struct mpoa_client *mpc)
386{ 405{
387 eg_cache_entry *entry; 406 eg_cache_entry *entry;
388 407
389 read_lock_irq(&mpc->egress_lock); 408 read_lock_irq(&mpc->egress_lock);
390 entry = mpc->eg_cache; 409 entry = mpc->eg_cache;
391 while(entry != NULL){ 410 while (entry != NULL) {
392 if(entry->latest_ip_addr == ipaddr) { 411 if (entry->latest_ip_addr == ipaddr) {
393 atomic_inc(&entry->use); 412 atomic_inc(&entry->use);
394 read_unlock_irq(&mpc->egress_lock); 413 read_unlock_irq(&mpc->egress_lock);
395 return entry; 414 return entry;
@@ -421,7 +440,7 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
421 struct k_message msg; 440 struct k_message msg;
422 441
423 vcc = entry->shortcut; 442 vcc = entry->shortcut;
424 dprintk("mpoa: mpoa_caches.c: removing an egress entry.\n"); 443 dprintk("removing an egress entry.\n");
425 if (entry->prev != NULL) 444 if (entry->prev != NULL)
426 entry->prev->next = entry->next; 445 entry->prev->next = entry->next;
427 else 446 else
@@ -429,9 +448,9 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
429 if (entry->next != NULL) 448 if (entry->next != NULL)
430 entry->next->prev = entry->prev; 449 entry->next->prev = entry->prev;
431 client->eg_ops->put(entry); 450 client->eg_ops->put(entry);
432 if(client->in_cache == NULL && client->eg_cache == NULL){ 451 if (client->in_cache == NULL && client->eg_cache == NULL) {
433 msg.type = STOP_KEEP_ALIVE_SM; 452 msg.type = STOP_KEEP_ALIVE_SM;
434 msg_to_mpoad(&msg,client); 453 msg_to_mpoad(&msg, client);
435 } 454 }
436 455
437 /* Check if the ingress side still uses this VCC */ 456 /* Check if the ingress side still uses this VCC */
@@ -447,20 +466,21 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
447 return; 466 return;
448} 467}
449 468
450static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_client *client) 469static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
470 struct mpoa_client *client)
451{ 471{
452 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL); 472 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL);
453 473
454 if (entry == NULL) { 474 if (entry == NULL) {
455 printk("mpoa: mpoa_caches.c: new_eg_cache_entry: out of memory\n"); 475 pr_info("out of memory\n");
456 return NULL; 476 return NULL;
457 } 477 }
458 478
459 dprintk("mpoa: mpoa_caches.c: adding an egress entry, ip = %pI4, this should be our IP\n", 479 dprintk("adding an egress entry, ip = %pI4, this should be our IP\n",
460 &msg->content.eg_info.eg_dst_ip); 480 &msg->content.eg_info.eg_dst_ip);
461 481
462 atomic_set(&entry->use, 1); 482 atomic_set(&entry->use, 1);
463 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: about to lock\n"); 483 dprintk("new_eg_cache_entry: about to lock\n");
464 write_lock_irq(&client->egress_lock); 484 write_lock_irq(&client->egress_lock);
465 entry->next = client->eg_cache; 485 entry->next = client->eg_cache;
466 entry->prev = NULL; 486 entry->prev = NULL;
@@ -472,18 +492,18 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_cli
472 entry->ctrl_info = msg->content.eg_info; 492 entry->ctrl_info = msg->content.eg_info;
473 do_gettimeofday(&(entry->tv)); 493 do_gettimeofday(&(entry->tv));
474 entry->entry_state = EGRESS_RESOLVED; 494 entry->entry_state = EGRESS_RESOLVED;
475 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry cache_id %lu\n", ntohl(entry->ctrl_info.cache_id)); 495 dprintk("new_eg_cache_entry cache_id %u\n",
476 dprintk("mpoa: mpoa_caches.c: mps_ip = %pI4\n", 496 ntohl(entry->ctrl_info.cache_id));
477 &entry->ctrl_info.mps_ip); 497 dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip);
478 atomic_inc(&entry->use); 498 atomic_inc(&entry->use);
479 499
480 write_unlock_irq(&client->egress_lock); 500 write_unlock_irq(&client->egress_lock);
481 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: unlocked\n"); 501 dprintk("new_eg_cache_entry: unlocked\n");
482 502
483 return entry; 503 return entry;
484} 504}
485 505
486static void update_eg_cache_entry(eg_cache_entry * entry, uint16_t holding_time) 506static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
487{ 507{
488 do_gettimeofday(&(entry->tv)); 508 do_gettimeofday(&(entry->tv));
489 entry->entry_state = EGRESS_RESOLVED; 509 entry->entry_state = EGRESS_RESOLVED;
@@ -502,13 +522,14 @@ static void clear_expired(struct mpoa_client *client)
502 522
503 write_lock_irq(&client->egress_lock); 523 write_lock_irq(&client->egress_lock);
504 entry = client->eg_cache; 524 entry = client->eg_cache;
505 while(entry != NULL){ 525 while (entry != NULL) {
506 next_entry = entry->next; 526 next_entry = entry->next;
507 if((now.tv_sec - entry->tv.tv_sec) 527 if ((now.tv_sec - entry->tv.tv_sec)
508 > entry->ctrl_info.holding_time){ 528 > entry->ctrl_info.holding_time) {
509 msg.type = SND_EGRESS_PURGE; 529 msg.type = SND_EGRESS_PURGE;
510 msg.content.eg_info = entry->ctrl_info; 530 msg.content.eg_info = entry->ctrl_info;
511 dprintk("mpoa: mpoa_caches.c: egress_cache: holding time expired, cache_id = %lu.\n",ntohl(entry->ctrl_info.cache_id)); 531 dprintk("egress_cache: holding time expired, cache_id = %u.\n",
532 ntohl(entry->ctrl_info.cache_id));
512 msg_to_mpoad(&msg, client); 533 msg_to_mpoad(&msg, client);
513 client->eg_ops->remove_entry(entry, client); 534 client->eg_ops->remove_entry(entry, client);
514 } 535 }
@@ -522,7 +543,7 @@ static void clear_expired(struct mpoa_client *client)
522static void eg_destroy_cache(struct mpoa_client *mpc) 543static void eg_destroy_cache(struct mpoa_client *mpc)
523{ 544{
524 write_lock_irq(&mpc->egress_lock); 545 write_lock_irq(&mpc->egress_lock);
525 while(mpc->eg_cache != NULL) 546 while (mpc->eg_cache != NULL)
526 mpc->eg_ops->remove_entry(mpc->eg_cache, mpc); 547 mpc->eg_ops->remove_entry(mpc->eg_cache, mpc);
527 write_unlock_irq(&mpc->egress_lock); 548 write_unlock_irq(&mpc->egress_lock);
528 549
@@ -530,7 +551,6 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
530} 551}
531 552
532 553
533
534static struct in_cache_ops ingress_ops = { 554static struct in_cache_ops ingress_ops = {
535 in_cache_add_entry, /* add_entry */ 555 in_cache_add_entry, /* add_entry */
536 in_cache_get, /* get */ 556 in_cache_get, /* get */
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 1a0f5ccea9c..b9bdb98427e 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -1,3 +1,4 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
1 2
2#ifdef CONFIG_PROC_FS 3#ifdef CONFIG_PROC_FS
3#include <linux/errno.h> 4#include <linux/errno.h>
@@ -8,7 +9,7 @@
8#include <linux/proc_fs.h> 9#include <linux/proc_fs.h>
9#include <linux/time.h> 10#include <linux/time.h>
10#include <linux/seq_file.h> 11#include <linux/seq_file.h>
11#include <asm/uaccess.h> 12#include <linux/uaccess.h>
12#include <linux/atmmpc.h> 13#include <linux/atmmpc.h>
13#include <linux/atm.h> 14#include <linux/atm.h>
14#include "mpc.h" 15#include "mpc.h"
@@ -20,9 +21,23 @@
20 */ 21 */
21 22
22#if 1 23#if 1
23#define dprintk printk /* debug */ 24#define dprintk(format, args...) \
25 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
24#else 26#else
25#define dprintk(format,args...) 27#define dprintk(format, args...) \
28 do { if (0) \
29 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
30 } while (0)
31#endif
32
33#if 0
34#define ddprintk(format, args...) \
35 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
36#else
37#define ddprintk(format, args...) \
38 do { if (0) \
39 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
40 } while (0)
26#endif 41#endif
27 42
28#define STAT_FILE_NAME "mpc" /* Our statistic file's name */ 43#define STAT_FILE_NAME "mpc" /* Our statistic file's name */
@@ -51,42 +66,37 @@ static const struct file_operations mpc_file_operations = {
51/* 66/*
52 * Returns the state of an ingress cache entry as a string 67 * Returns the state of an ingress cache entry as a string
53 */ 68 */
54static const char *ingress_state_string(int state){ 69static const char *ingress_state_string(int state)
55 switch(state) { 70{
71 switch (state) {
56 case INGRESS_RESOLVING: 72 case INGRESS_RESOLVING:
57 return "resolving "; 73 return "resolving ";
58 break;
59 case INGRESS_RESOLVED: 74 case INGRESS_RESOLVED:
60 return "resolved "; 75 return "resolved ";
61 break;
62 case INGRESS_INVALID: 76 case INGRESS_INVALID:
63 return "invalid "; 77 return "invalid ";
64 break;
65 case INGRESS_REFRESHING: 78 case INGRESS_REFRESHING:
66 return "refreshing "; 79 return "refreshing ";
67 break;
68 default:
69 return "";
70 } 80 }
81
82 return "";
71} 83}
72 84
73/* 85/*
74 * Returns the state of an egress cache entry as a string 86 * Returns the state of an egress cache entry as a string
75 */ 87 */
76static const char *egress_state_string(int state){ 88static const char *egress_state_string(int state)
77 switch(state) { 89{
90 switch (state) {
78 case EGRESS_RESOLVED: 91 case EGRESS_RESOLVED:
79 return "resolved "; 92 return "resolved ";
80 break;
81 case EGRESS_PURGE: 93 case EGRESS_PURGE:
82 return "purge "; 94 return "purge ";
83 break;
84 case EGRESS_INVALID: 95 case EGRESS_INVALID:
85 return "invalid "; 96 return "invalid ";
86 break;
87 default:
88 return "";
89 } 97 }
98
99 return "";
90} 100}
91 101
92/* 102/*
@@ -123,7 +133,6 @@ static void mpc_stop(struct seq_file *m, void *v)
123static int mpc_show(struct seq_file *m, void *v) 133static int mpc_show(struct seq_file *m, void *v)
124{ 134{
125 struct mpoa_client *mpc = v; 135 struct mpoa_client *mpc = v;
126 unsigned char *temp;
127 int i; 136 int i;
128 in_cache_entry *in_entry; 137 in_cache_entry *in_entry;
129 eg_cache_entry *eg_entry; 138 eg_cache_entry *eg_entry;
@@ -140,15 +149,17 @@ static int mpc_show(struct seq_file *m, void *v)
140 do_gettimeofday(&now); 149 do_gettimeofday(&now);
141 150
142 for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) { 151 for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
143 temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip; 152 sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
144 sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
145 seq_printf(m, "%-16s%s%-14lu%-12u", 153 seq_printf(m, "%-16s%s%-14lu%-12u",
146 ip_string, 154 ip_string,
147 ingress_state_string(in_entry->entry_state), 155 ingress_state_string(in_entry->entry_state),
148 in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec), 156 in_entry->ctrl_info.holding_time -
149 in_entry->packets_fwded); 157 (now.tv_sec-in_entry->tv.tv_sec),
158 in_entry->packets_fwded);
150 if (in_entry->shortcut) 159 if (in_entry->shortcut)
151 seq_printf(m, " %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci); 160 seq_printf(m, " %-3d %-3d",
161 in_entry->shortcut->vpi,
162 in_entry->shortcut->vci);
152 seq_printf(m, "\n"); 163 seq_printf(m, "\n");
153 } 164 }
154 165
@@ -156,21 +167,23 @@ static int mpc_show(struct seq_file *m, void *v)
156 seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n"); 167 seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
157 for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) { 168 for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
158 unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr; 169 unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
159 for(i = 0; i < ATM_ESA_LEN; i++) 170 for (i = 0; i < ATM_ESA_LEN; i++)
160 seq_printf(m, "%02x", p[i]); 171 seq_printf(m, "%02x", p[i]);
161 seq_printf(m, "\n%-16lu%s%-14lu%-15u", 172 seq_printf(m, "\n%-16lu%s%-14lu%-15u",
162 (unsigned long)ntohl(eg_entry->ctrl_info.cache_id), 173 (unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
163 egress_state_string(eg_entry->entry_state), 174 egress_state_string(eg_entry->entry_state),
164 (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)), 175 (eg_entry->ctrl_info.holding_time -
176 (now.tv_sec-eg_entry->tv.tv_sec)),
165 eg_entry->packets_rcvd); 177 eg_entry->packets_rcvd);
166 178
167 /* latest IP address */ 179 /* latest IP address */
168 temp = (unsigned char *)&eg_entry->latest_ip_addr; 180 sprintf(ip_string, "%pI4", &eg_entry->latest_ip_addr);
169 sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
170 seq_printf(m, "%-16s", ip_string); 181 seq_printf(m, "%-16s", ip_string);
171 182
172 if (eg_entry->shortcut) 183 if (eg_entry->shortcut)
173 seq_printf(m, " %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci); 184 seq_printf(m, " %-3d %-3d",
185 eg_entry->shortcut->vpi,
186 eg_entry->shortcut->vci);
174 seq_printf(m, "\n"); 187 seq_printf(m, "\n");
175 } 188 }
176 seq_printf(m, "\n"); 189 seq_printf(m, "\n");
@@ -258,12 +271,9 @@ static int parse_qos(const char *buff)
258 qos.rxtp.max_pcr = rx_pcr; 271 qos.rxtp.max_pcr = rx_pcr;
259 qos.rxtp.max_sdu = rx_sdu; 272 qos.rxtp.max_sdu = rx_sdu;
260 qos.aal = ATM_AAL5; 273 qos.aal = ATM_AAL5;
261 dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n", 274 dprintk("parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
262 qos.txtp.max_pcr, 275 qos.txtp.max_pcr, qos.txtp.max_sdu,
263 qos.txtp.max_sdu, 276 qos.rxtp.max_pcr, qos.rxtp.max_sdu);
264 qos.rxtp.max_pcr,
265 qos.rxtp.max_sdu
266 );
267 277
268 atm_mpoa_add_qos(ipaddr, &qos); 278 atm_mpoa_add_qos(ipaddr, &qos);
269 return 1; 279 return 1;
@@ -278,7 +288,7 @@ int mpc_proc_init(void)
278 288
279 p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations); 289 p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
280 if (!p) { 290 if (!p) {
281 printk(KERN_ERR "Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME); 291 pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
282 return -ENOMEM; 292 return -ENOMEM;
283 } 293 }
284 return 0; 294 return 0;
@@ -289,10 +299,9 @@ int mpc_proc_init(void)
289 */ 299 */
290void mpc_proc_clean(void) 300void mpc_proc_clean(void)
291{ 301{
292 remove_proc_entry(STAT_FILE_NAME,atm_proc_root); 302 remove_proc_entry(STAT_FILE_NAME, atm_proc_root);
293} 303}
294 304
295
296#endif /* CONFIG_PROC_FS */ 305#endif /* CONFIG_PROC_FS */
297 306
298 307
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 0af84cd4f65..400839273c6 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -33,6 +33,8 @@
33 * These hooks are not yet available in ppp_generic 33 * These hooks are not yet available in ppp_generic
34 */ 34 */
35 35
36#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
37
36#include <linux/module.h> 38#include <linux/module.h>
37#include <linux/init.h> 39#include <linux/init.h>
38#include <linux/skbuff.h> 40#include <linux/skbuff.h>
@@ -132,7 +134,7 @@ static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
132static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb) 134static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
133{ 135{
134 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); 136 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
135 pr_debug("pppoatm push\n"); 137 pr_debug("\n");
136 if (skb == NULL) { /* VCC was closed */ 138 if (skb == NULL) { /* VCC was closed */
137 pr_debug("removing ATMPPP VCC %p\n", pvcc); 139 pr_debug("removing ATMPPP VCC %p\n", pvcc);
138 pppoatm_unassign_vcc(atmvcc); 140 pppoatm_unassign_vcc(atmvcc);
@@ -165,17 +167,17 @@ static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
165 pvcc->chan.mtu += LLC_LEN; 167 pvcc->chan.mtu += LLC_LEN;
166 break; 168 break;
167 } 169 }
168 pr_debug("Couldn't autodetect yet " 170 pr_debug("Couldn't autodetect yet (skb: %02X %02X %02X %02X %02X %02X)\n",
169 "(skb: %02X %02X %02X %02X %02X %02X)\n", 171 skb->data[0], skb->data[1], skb->data[2],
170 skb->data[0], skb->data[1], skb->data[2], 172 skb->data[3], skb->data[4], skb->data[5]);
171 skb->data[3], skb->data[4], skb->data[5]);
172 goto error; 173 goto error;
173 case e_vc: 174 case e_vc:
174 break; 175 break;
175 } 176 }
176 ppp_input(&pvcc->chan, skb); 177 ppp_input(&pvcc->chan, skb);
177 return; 178 return;
178 error: 179
180error:
179 kfree_skb(skb); 181 kfree_skb(skb);
180 ppp_input_error(&pvcc->chan, 0); 182 ppp_input_error(&pvcc->chan, 0);
181} 183}
@@ -194,7 +196,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
194{ 196{
195 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan); 197 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
196 ATM_SKB(skb)->vcc = pvcc->atmvcc; 198 ATM_SKB(skb)->vcc = pvcc->atmvcc;
197 pr_debug("pppoatm_send (skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc); 199 pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
198 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT)) 200 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
199 (void) skb_pull(skb, 1); 201 (void) skb_pull(skb, 1);
200 switch (pvcc->encaps) { /* LLC encapsulation needed */ 202 switch (pvcc->encaps) { /* LLC encapsulation needed */
@@ -208,7 +210,8 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
208 goto nospace; 210 goto nospace;
209 } 211 }
210 kfree_skb(skb); 212 kfree_skb(skb);
211 if ((skb = n) == NULL) 213 skb = n;
214 if (skb == NULL)
212 return DROP_PACKET; 215 return DROP_PACKET;
213 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize)) 216 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
214 goto nospace; 217 goto nospace;
@@ -226,11 +229,11 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
226 229
227 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); 230 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
228 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; 231 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
229 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, ATM_SKB(skb)->vcc, 232 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
230 ATM_SKB(skb)->vcc->dev); 233 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
231 return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb) 234 return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
232 ? DROP_PACKET : 1; 235 ? DROP_PACKET : 1;
233 nospace: 236nospace:
234 /* 237 /*
235 * We don't have space to send this SKB now, but we might have 238 * We don't have space to send this SKB now, but we might have
236 * already applied SC_COMP_PROT compression, so may need to undo 239 * already applied SC_COMP_PROT compression, so may need to undo
@@ -289,7 +292,8 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
289 (be.encaps == e_vc ? 0 : LLC_LEN); 292 (be.encaps == e_vc ? 0 : LLC_LEN);
290 pvcc->wakeup_tasklet = tasklet_proto; 293 pvcc->wakeup_tasklet = tasklet_proto;
291 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan; 294 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan;
292 if ((err = ppp_register_channel(&pvcc->chan)) != 0) { 295 err = ppp_register_channel(&pvcc->chan);
296 if (err != 0) {
293 kfree(pvcc); 297 kfree(pvcc);
294 return err; 298 return err;
295 } 299 }
diff --git a/net/atm/proc.c b/net/atm/proc.c
index ab8419a324b..476779d845e 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -24,15 +24,15 @@
24#include <linux/init.h> /* for __init */ 24#include <linux/init.h> /* for __init */
25#include <net/net_namespace.h> 25#include <net/net_namespace.h>
26#include <net/atmclip.h> 26#include <net/atmclip.h>
27#include <asm/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/param.h> /* for HZ */
28#include <asm/atomic.h> 29#include <asm/atomic.h>
29#include <asm/param.h> /* for HZ */
30#include "resources.h" 30#include "resources.h"
31#include "common.h" /* atm_proc_init prototype */ 31#include "common.h" /* atm_proc_init prototype */
32#include "signaling.h" /* to get sigd - ugly too */ 32#include "signaling.h" /* to get sigd - ugly too */
33 33
34static ssize_t proc_dev_atm_read(struct file *file,char __user *buf,size_t count, 34static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
35 loff_t *pos); 35 size_t count, loff_t *pos);
36 36
37static const struct file_operations proc_atm_dev_ops = { 37static const struct file_operations proc_atm_dev_ops = {
38 .owner = THIS_MODULE, 38 .owner = THIS_MODULE,
@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
43 const struct k_atm_aal_stats *stats) 43 const struct k_atm_aal_stats *stats)
44{ 44{
45 seq_printf(seq, "%s ( %d %d %d %d %d )", aal, 45 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
46 atomic_read(&stats->tx),atomic_read(&stats->tx_err), 46 atomic_read(&stats->tx), atomic_read(&stats->tx_err),
47 atomic_read(&stats->rx),atomic_read(&stats->rx_err), 47 atomic_read(&stats->rx), atomic_read(&stats->rx_err),
48 atomic_read(&stats->rx_drop)); 48 atomic_read(&stats->rx_drop));
49} 49}
50 50
51static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) 51static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
@@ -151,8 +151,8 @@ static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
151 151
152static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) 152static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
153{ 153{
154 static const char *const class_name[] = 154 static const char *const class_name[] = {
155 {"off","UBR","CBR","VBR","ABR"}; 155 "off", "UBR", "CBR", "VBR", "ABR"};
156 static const char *const aal_name[] = { 156 static const char *const aal_name[] = {
157 "---", "1", "2", "3/4", /* 0- 3 */ 157 "---", "1", "2", "3/4", /* 0- 3 */
158 "???", "5", "???", "???", /* 4- 7 */ 158 "???", "5", "???", "???", /* 4- 7 */
@@ -160,11 +160,12 @@ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
160 "???", "0", "???", "???"}; /* 12-15 */ 160 "???", "0", "???", "???"}; /* 12-15 */
161 161
162 seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s", 162 seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s",
163 vcc->dev->number,vcc->vpi,vcc->vci, 163 vcc->dev->number, vcc->vpi, vcc->vci,
164 vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" : 164 vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" :
165 aal_name[vcc->qos.aal],vcc->qos.rxtp.min_pcr, 165 aal_name[vcc->qos.aal], vcc->qos.rxtp.min_pcr,
166 class_name[vcc->qos.rxtp.traffic_class],vcc->qos.txtp.min_pcr, 166 class_name[vcc->qos.rxtp.traffic_class],
167 class_name[vcc->qos.txtp.traffic_class]); 167 vcc->qos.txtp.min_pcr,
168 class_name[vcc->qos.txtp.traffic_class]);
168 if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) { 169 if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) {
169 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 170 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
170 struct net_device *dev; 171 struct net_device *dev;
@@ -195,19 +196,20 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
195 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, 196 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi,
196 vcc->vci); 197 vcc->vci);
197 switch (sk->sk_family) { 198 switch (sk->sk_family) {
198 case AF_ATMPVC: 199 case AF_ATMPVC:
199 seq_printf(seq, "PVC"); 200 seq_printf(seq, "PVC");
200 break; 201 break;
201 case AF_ATMSVC: 202 case AF_ATMSVC:
202 seq_printf(seq, "SVC"); 203 seq_printf(seq, "SVC");
203 break; 204 break;
204 default: 205 default:
205 seq_printf(seq, "%3d", sk->sk_family); 206 seq_printf(seq, "%3d", sk->sk_family);
206 } 207 }
207 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err, 208 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n",
208 sk_wmem_alloc_get(sk), sk->sk_sndbuf, 209 vcc->flags, sk->sk_err,
209 sk_rmem_alloc_get(sk), sk->sk_rcvbuf, 210 sk_wmem_alloc_get(sk), sk->sk_sndbuf,
210 atomic_read(&sk->sk_refcnt)); 211 sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
212 atomic_read(&sk->sk_refcnt));
211} 213}
212 214
213static void svc_info(struct seq_file *seq, struct atm_vcc *vcc) 215static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
@@ -376,32 +378,35 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
376 unsigned long page; 378 unsigned long page;
377 int length; 379 int length;
378 380
379 if (count == 0) return 0; 381 if (count == 0)
382 return 0;
380 page = get_zeroed_page(GFP_KERNEL); 383 page = get_zeroed_page(GFP_KERNEL);
381 if (!page) return -ENOMEM; 384 if (!page)
385 return -ENOMEM;
382 dev = PDE(file->f_path.dentry->d_inode)->data; 386 dev = PDE(file->f_path.dentry->d_inode)->data;
383 if (!dev->ops->proc_read) 387 if (!dev->ops->proc_read)
384 length = -EINVAL; 388 length = -EINVAL;
385 else { 389 else {
386 length = dev->ops->proc_read(dev,pos,(char *) page); 390 length = dev->ops->proc_read(dev, pos, (char *)page);
387 if (length > count) length = -EINVAL; 391 if (length > count)
392 length = -EINVAL;
388 } 393 }
389 if (length >= 0) { 394 if (length >= 0) {
390 if (copy_to_user(buf,(char *) page,length)) length = -EFAULT; 395 if (copy_to_user(buf, (char *)page, length))
396 length = -EFAULT;
391 (*pos)++; 397 (*pos)++;
392 } 398 }
393 free_page(page); 399 free_page(page);
394 return length; 400 return length;
395} 401}
396 402
397
398struct proc_dir_entry *atm_proc_root; 403struct proc_dir_entry *atm_proc_root;
399EXPORT_SYMBOL(atm_proc_root); 404EXPORT_SYMBOL(atm_proc_root);
400 405
401 406
402int atm_proc_dev_register(struct atm_dev *dev) 407int atm_proc_dev_register(struct atm_dev *dev)
403{ 408{
404 int digits,num; 409 int digits, num;
405 int error; 410 int error;
406 411
407 /* No proc info */ 412 /* No proc info */
@@ -410,26 +415,28 @@ int atm_proc_dev_register(struct atm_dev *dev)
410 415
411 error = -ENOMEM; 416 error = -ENOMEM;
412 digits = 0; 417 digits = 0;
413 for (num = dev->number; num; num /= 10) digits++; 418 for (num = dev->number; num; num /= 10)
414 if (!digits) digits++; 419 digits++;
420 if (!digits)
421 digits++;
415 422
416 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL); 423 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL);
417 if (!dev->proc_name) 424 if (!dev->proc_name)
418 goto err_out; 425 goto err_out;
419 sprintf(dev->proc_name,"%s:%d",dev->type, dev->number); 426 sprintf(dev->proc_name, "%s:%d", dev->type, dev->number);
420 427
421 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, 428 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
422 &proc_atm_dev_ops, dev); 429 &proc_atm_dev_ops, dev);
423 if (!dev->proc_entry) 430 if (!dev->proc_entry)
424 goto err_free_name; 431 goto err_free_name;
425 return 0; 432 return 0;
433
426err_free_name: 434err_free_name:
427 kfree(dev->proc_name); 435 kfree(dev->proc_name);
428err_out: 436err_out:
429 return error; 437 return error;
430} 438}
431 439
432
433void atm_proc_dev_deregister(struct atm_dev *dev) 440void atm_proc_dev_deregister(struct atm_dev *dev)
434{ 441{
435 if (!dev->ops->proc_read) 442 if (!dev->ops->proc_read)
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 8d74e62b0d7..437ee70c5e6 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -17,32 +17,35 @@
17#include "common.h" /* common for PVCs and SVCs */ 17#include "common.h" /* common for PVCs and SVCs */
18 18
19 19
20static int pvc_shutdown(struct socket *sock,int how) 20static int pvc_shutdown(struct socket *sock, int how)
21{ 21{
22 return 0; 22 return 0;
23} 23}
24 24
25 25static int pvc_bind(struct socket *sock, struct sockaddr *sockaddr,
26static int pvc_bind(struct socket *sock,struct sockaddr *sockaddr, 26 int sockaddr_len)
27 int sockaddr_len)
28{ 27{
29 struct sock *sk = sock->sk; 28 struct sock *sk = sock->sk;
30 struct sockaddr_atmpvc *addr; 29 struct sockaddr_atmpvc *addr;
31 struct atm_vcc *vcc; 30 struct atm_vcc *vcc;
32 int error; 31 int error;
33 32
34 if (sockaddr_len != sizeof(struct sockaddr_atmpvc)) return -EINVAL; 33 if (sockaddr_len != sizeof(struct sockaddr_atmpvc))
35 addr = (struct sockaddr_atmpvc *) sockaddr; 34 return -EINVAL;
36 if (addr->sap_family != AF_ATMPVC) return -EAFNOSUPPORT; 35 addr = (struct sockaddr_atmpvc *)sockaddr;
36 if (addr->sap_family != AF_ATMPVC)
37 return -EAFNOSUPPORT;
37 lock_sock(sk); 38 lock_sock(sk);
38 vcc = ATM_SD(sock); 39 vcc = ATM_SD(sock);
39 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { 40 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
40 error = -EBADFD; 41 error = -EBADFD;
41 goto out; 42 goto out;
42 } 43 }
43 if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) { 44 if (test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
44 if (vcc->vpi != ATM_VPI_UNSPEC) addr->sap_addr.vpi = vcc->vpi; 45 if (vcc->vpi != ATM_VPI_UNSPEC)
45 if (vcc->vci != ATM_VCI_UNSPEC) addr->sap_addr.vci = vcc->vci; 46 addr->sap_addr.vpi = vcc->vpi;
47 if (vcc->vci != ATM_VCI_UNSPEC)
48 addr->sap_addr.vci = vcc->vci;
46 } 49 }
47 error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi, 50 error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi,
48 addr->sap_addr.vci); 51 addr->sap_addr.vci);
@@ -51,11 +54,10 @@ out:
51 return error; 54 return error;
52} 55}
53 56
54 57static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr,
55static int pvc_connect(struct socket *sock,struct sockaddr *sockaddr, 58 int sockaddr_len, int flags)
56 int sockaddr_len,int flags)
57{ 59{
58 return pvc_bind(sock,sockaddr,sockaddr_len); 60 return pvc_bind(sock, sockaddr, sockaddr_len);
59} 61}
60 62
61static int pvc_setsockopt(struct socket *sock, int level, int optname, 63static int pvc_setsockopt(struct socket *sock, int level, int optname,
@@ -70,7 +72,6 @@ static int pvc_setsockopt(struct socket *sock, int level, int optname,
70 return error; 72 return error;
71} 73}
72 74
73
74static int pvc_getsockopt(struct socket *sock, int level, int optname, 75static int pvc_getsockopt(struct socket *sock, int level, int optname,
75 char __user *optval, int __user *optlen) 76 char __user *optval, int __user *optlen)
76{ 77{
@@ -83,16 +84,16 @@ static int pvc_getsockopt(struct socket *sock, int level, int optname,
83 return error; 84 return error;
84} 85}
85 86
86 87static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
87static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr, 88 int *sockaddr_len, int peer)
88 int *sockaddr_len,int peer)
89{ 89{
90 struct sockaddr_atmpvc *addr; 90 struct sockaddr_atmpvc *addr;
91 struct atm_vcc *vcc = ATM_SD(sock); 91 struct atm_vcc *vcc = ATM_SD(sock);
92 92
93 if (!vcc->dev || !test_bit(ATM_VF_ADDR,&vcc->flags)) return -ENOTCONN; 93 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
94 return -ENOTCONN;
94 *sockaddr_len = sizeof(struct sockaddr_atmpvc); 95 *sockaddr_len = sizeof(struct sockaddr_atmpvc);
95 addr = (struct sockaddr_atmpvc *) sockaddr; 96 addr = (struct sockaddr_atmpvc *)sockaddr;
96 addr->sap_family = AF_ATMPVC; 97 addr->sap_family = AF_ATMPVC;
97 addr->sap_addr.itf = vcc->dev->number; 98 addr->sap_addr.itf = vcc->dev->number;
98 addr->sap_addr.vpi = vcc->vpi; 99 addr->sap_addr.vpi = vcc->vpi;
@@ -100,7 +101,6 @@ static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr,
100 return 0; 101 return 0;
101} 102}
102 103
103
104static const struct proto_ops pvc_proto_ops = { 104static const struct proto_ops pvc_proto_ops = {
105 .family = PF_ATMPVC, 105 .family = PF_ATMPVC,
106 .owner = THIS_MODULE, 106 .owner = THIS_MODULE,
@@ -137,7 +137,6 @@ static int pvc_create(struct net *net, struct socket *sock, int protocol,
137 return vcc_create(net, sock, protocol, PF_ATMPVC); 137 return vcc_create(net, sock, protocol, PF_ATMPVC);
138} 138}
139 139
140
141static const struct net_proto_family pvc_family_ops = { 140static const struct net_proto_family pvc_family_ops = {
142 .family = PF_ATMPVC, 141 .family = PF_ATMPVC,
143 .create = pvc_create, 142 .create = pvc_create,
diff --git a/net/atm/raw.c b/net/atm/raw.c
index cbfcc71a17b..d0c4bd047dc 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/atmdev.h> 8#include <linux/atmdev.h>
@@ -17,7 +18,7 @@
17 * SKB == NULL indicates that the link is being closed 18 * SKB == NULL indicates that the link is being closed
18 */ 19 */
19 20
20static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb) 21static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
21{ 22{
22 if (skb) { 23 if (skb) {
23 struct sock *sk = sk_atm(vcc); 24 struct sock *sk = sk_atm(vcc);
@@ -27,36 +28,33 @@ static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb)
27 } 28 }
28} 29}
29 30
30 31static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
31static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb)
32{ 32{
33 struct sock *sk = sk_atm(vcc); 33 struct sock *sk = sk_atm(vcc);
34 34
35 pr_debug("APopR (%d) %d -= %d\n", vcc->vci, 35 pr_debug("(%d) %d -= %d\n",
36 sk_wmem_alloc_get(sk), skb->truesize); 36 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
37 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 37 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
38 dev_kfree_skb_any(skb); 38 dev_kfree_skb_any(skb);
39 sk->sk_write_space(sk); 39 sk->sk_write_space(sk);
40} 40}
41 41
42 42static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb)
43static int atm_send_aal0(struct atm_vcc *vcc,struct sk_buff *skb)
44{ 43{
45 /* 44 /*
46 * Note that if vpi/vci are _ANY or _UNSPEC the below will 45 * Note that if vpi/vci are _ANY or _UNSPEC the below will
47 * still work 46 * still work
48 */ 47 */
49 if (!capable(CAP_NET_ADMIN) && 48 if (!capable(CAP_NET_ADMIN) &&
50 (((u32 *) skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) != 49 (((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
51 ((vcc->vpi << ATM_HDR_VPI_SHIFT) | (vcc->vci << ATM_HDR_VCI_SHIFT))) 50 ((vcc->vpi << ATM_HDR_VPI_SHIFT) |
52 { 51 (vcc->vci << ATM_HDR_VCI_SHIFT))) {
53 kfree_skb(skb); 52 kfree_skb(skb);
54 return -EADDRNOTAVAIL; 53 return -EADDRNOTAVAIL;
55 } 54 }
56 return vcc->dev->ops->send(vcc,skb); 55 return vcc->dev->ops->send(vcc, skb);
57} 56}
58 57
59
60int atm_init_aal0(struct atm_vcc *vcc) 58int atm_init_aal0(struct atm_vcc *vcc)
61{ 59{
62 vcc->push = atm_push_raw; 60 vcc->push = atm_push_raw;
@@ -66,7 +64,6 @@ int atm_init_aal0(struct atm_vcc *vcc)
66 return 0; 64 return 0;
67} 65}
68 66
69
70int atm_init_aal34(struct atm_vcc *vcc) 67int atm_init_aal34(struct atm_vcc *vcc)
71{ 68{
72 vcc->push = atm_push_raw; 69 vcc->push = atm_push_raw;
@@ -76,7 +73,6 @@ int atm_init_aal34(struct atm_vcc *vcc)
76 return 0; 73 return 0;
77} 74}
78 75
79
80int atm_init_aal5(struct atm_vcc *vcc) 76int atm_init_aal5(struct atm_vcc *vcc)
81{ 77{
82 vcc->push = atm_push_raw; 78 vcc->push = atm_push_raw;
@@ -85,6 +81,4 @@ int atm_init_aal5(struct atm_vcc *vcc)
85 vcc->send = vcc->dev->ops->send; 81 vcc->send = vcc->dev->ops->send;
86 return 0; 82 return 0;
87} 83}
88
89
90EXPORT_SYMBOL(atm_init_aal5); 84EXPORT_SYMBOL(atm_init_aal5);
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 56b7322ff46..447ed89205d 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -7,6 +7,7 @@
7 * 2002/01 - don't free the whole struct sock on sk->destruct time, 7 * 2002/01 - don't free the whole struct sock on sk->destruct time,
8 * use the default destruct function initialized by sock_init_data */ 8 * use the default destruct function initialized by sock_init_data */
9 9
10#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
10 11
11#include <linux/ctype.h> 12#include <linux/ctype.h>
12#include <linux/string.h> 13#include <linux/string.h>
@@ -70,7 +71,7 @@ struct atm_dev *atm_dev_lookup(int number)
70 mutex_unlock(&atm_dev_mutex); 71 mutex_unlock(&atm_dev_mutex);
71 return dev; 72 return dev;
72} 73}
73 74EXPORT_SYMBOL(atm_dev_lookup);
74 75
75struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, 76struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
76 int number, unsigned long *flags) 77 int number, unsigned long *flags)
@@ -79,13 +80,13 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
79 80
80 dev = __alloc_atm_dev(type); 81 dev = __alloc_atm_dev(type);
81 if (!dev) { 82 if (!dev) {
82 printk(KERN_ERR "atm_dev_register: no space for dev %s\n", 83 pr_err("no space for dev %s\n", type);
83 type);
84 return NULL; 84 return NULL;
85 } 85 }
86 mutex_lock(&atm_dev_mutex); 86 mutex_lock(&atm_dev_mutex);
87 if (number != -1) { 87 if (number != -1) {
88 if ((inuse = __atm_dev_lookup(number))) { 88 inuse = __atm_dev_lookup(number);
89 if (inuse) {
89 atm_dev_put(inuse); 90 atm_dev_put(inuse);
90 mutex_unlock(&atm_dev_mutex); 91 mutex_unlock(&atm_dev_mutex);
91 kfree(dev); 92 kfree(dev);
@@ -109,16 +110,12 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
109 atomic_set(&dev->refcnt, 1); 110 atomic_set(&dev->refcnt, 1);
110 111
111 if (atm_proc_dev_register(dev) < 0) { 112 if (atm_proc_dev_register(dev) < 0) {
112 printk(KERN_ERR "atm_dev_register: " 113 pr_err("atm_proc_dev_register failed for dev %s\n", type);
113 "atm_proc_dev_register failed for dev %s\n",
114 type);
115 goto out_fail; 114 goto out_fail;
116 } 115 }
117 116
118 if (atm_register_sysfs(dev) < 0) { 117 if (atm_register_sysfs(dev) < 0) {
119 printk(KERN_ERR "atm_dev_register: " 118 pr_err("atm_register_sysfs failed for dev %s\n", type);
120 "atm_register_sysfs failed for dev %s\n",
121 type);
122 atm_proc_dev_deregister(dev); 119 atm_proc_dev_deregister(dev);
123 goto out_fail; 120 goto out_fail;
124 } 121 }
@@ -134,7 +131,7 @@ out_fail:
134 dev = NULL; 131 dev = NULL;
135 goto out; 132 goto out;
136} 133}
137 134EXPORT_SYMBOL(atm_dev_register);
138 135
139void atm_dev_deregister(struct atm_dev *dev) 136void atm_dev_deregister(struct atm_dev *dev)
140{ 137{
@@ -156,7 +153,7 @@ void atm_dev_deregister(struct atm_dev *dev)
156 153
157 atm_dev_put(dev); 154 atm_dev_put(dev);
158} 155}
159 156EXPORT_SYMBOL(atm_dev_deregister);
160 157
161static void copy_aal_stats(struct k_atm_aal_stats *from, 158static void copy_aal_stats(struct k_atm_aal_stats *from,
162 struct atm_aal_stats *to) 159 struct atm_aal_stats *to)
@@ -166,7 +163,6 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
166#undef __HANDLE_ITEM 163#undef __HANDLE_ITEM
167} 164}
168 165
169
170static void subtract_aal_stats(struct k_atm_aal_stats *from, 166static void subtract_aal_stats(struct k_atm_aal_stats *from,
171 struct atm_aal_stats *to) 167 struct atm_aal_stats *to)
172{ 168{
@@ -175,8 +171,8 @@ static void subtract_aal_stats(struct k_atm_aal_stats *from,
175#undef __HANDLE_ITEM 171#undef __HANDLE_ITEM
176} 172}
177 173
178 174static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg,
179static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, int zero) 175 int zero)
180{ 176{
181 struct atm_dev_stats tmp; 177 struct atm_dev_stats tmp;
182 int error = 0; 178 int error = 0;
@@ -194,7 +190,6 @@ static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, in
194 return error ? -EFAULT : 0; 190 return error ? -EFAULT : 0;
195} 191}
196 192
197
198int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat) 193int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
199{ 194{
200 void __user *buf; 195 void __user *buf;
@@ -210,50 +205,49 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
210#endif 205#endif
211 206
212 switch (cmd) { 207 switch (cmd) {
213 case ATM_GETNAMES: 208 case ATM_GETNAMES:
214 209 if (compat) {
215 if (compat) {
216#ifdef CONFIG_COMPAT 210#ifdef CONFIG_COMPAT
217 struct compat_atm_iobuf __user *ciobuf = arg; 211 struct compat_atm_iobuf __user *ciobuf = arg;
218 compat_uptr_t cbuf; 212 compat_uptr_t cbuf;
219 iobuf_len = &ciobuf->length; 213 iobuf_len = &ciobuf->length;
220 if (get_user(cbuf, &ciobuf->buffer)) 214 if (get_user(cbuf, &ciobuf->buffer))
221 return -EFAULT; 215 return -EFAULT;
222 buf = compat_ptr(cbuf); 216 buf = compat_ptr(cbuf);
223#endif 217#endif
224 } else { 218 } else {
225 struct atm_iobuf __user *iobuf = arg; 219 struct atm_iobuf __user *iobuf = arg;
226 iobuf_len = &iobuf->length; 220 iobuf_len = &iobuf->length;
227 if (get_user(buf, &iobuf->buffer)) 221 if (get_user(buf, &iobuf->buffer))
228 return -EFAULT;
229 }
230 if (get_user(len, iobuf_len))
231 return -EFAULT; 222 return -EFAULT;
232 mutex_lock(&atm_dev_mutex); 223 }
233 list_for_each(p, &atm_devs) 224 if (get_user(len, iobuf_len))
234 size += sizeof(int); 225 return -EFAULT;
235 if (size > len) { 226 mutex_lock(&atm_dev_mutex);
236 mutex_unlock(&atm_dev_mutex); 227 list_for_each(p, &atm_devs)
237 return -E2BIG; 228 size += sizeof(int);
238 } 229 if (size > len) {
239 tmp_buf = kmalloc(size, GFP_ATOMIC);
240 if (!tmp_buf) {
241 mutex_unlock(&atm_dev_mutex);
242 return -ENOMEM;
243 }
244 tmp_p = tmp_buf;
245 list_for_each(p, &atm_devs) {
246 dev = list_entry(p, struct atm_dev, dev_list);
247 *tmp_p++ = dev->number;
248 }
249 mutex_unlock(&atm_dev_mutex); 230 mutex_unlock(&atm_dev_mutex);
250 error = ((copy_to_user(buf, tmp_buf, size)) || 231 return -E2BIG;
251 put_user(size, iobuf_len)) 232 }
252 ? -EFAULT : 0; 233 tmp_buf = kmalloc(size, GFP_ATOMIC);
253 kfree(tmp_buf); 234 if (!tmp_buf) {
254 return error; 235 mutex_unlock(&atm_dev_mutex);
255 default: 236 return -ENOMEM;
256 break; 237 }
238 tmp_p = tmp_buf;
239 list_for_each(p, &atm_devs) {
240 dev = list_entry(p, struct atm_dev, dev_list);
241 *tmp_p++ = dev->number;
242 }
243 mutex_unlock(&atm_dev_mutex);
244 error = ((copy_to_user(buf, tmp_buf, size)) ||
245 put_user(size, iobuf_len))
246 ? -EFAULT : 0;
247 kfree(tmp_buf);
248 return error;
249 default:
250 break;
257 } 251 }
258 252
259 if (compat) { 253 if (compat) {
@@ -282,166 +276,167 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
282 if (get_user(number, &sioc->number)) 276 if (get_user(number, &sioc->number))
283 return -EFAULT; 277 return -EFAULT;
284 } 278 }
285 if (!(dev = try_then_request_module(atm_dev_lookup(number), 279
286 "atm-device-%d", number))) 280 dev = try_then_request_module(atm_dev_lookup(number), "atm-device-%d",
281 number);
282 if (!dev)
287 return -ENODEV; 283 return -ENODEV;
288 284
289 switch (cmd) { 285 switch (cmd) {
290 case ATM_GETTYPE: 286 case ATM_GETTYPE:
291 size = strlen(dev->type) + 1; 287 size = strlen(dev->type) + 1;
292 if (copy_to_user(buf, dev->type, size)) { 288 if (copy_to_user(buf, dev->type, size)) {
293 error = -EFAULT; 289 error = -EFAULT;
294 goto done; 290 goto done;
295 } 291 }
296 break; 292 break;
297 case ATM_GETESI: 293 case ATM_GETESI:
298 size = ESI_LEN; 294 size = ESI_LEN;
299 if (copy_to_user(buf, dev->esi, size)) { 295 if (copy_to_user(buf, dev->esi, size)) {
300 error = -EFAULT; 296 error = -EFAULT;
301 goto done; 297 goto done;
302 } 298 }
303 break; 299 break;
304 case ATM_SETESI: 300 case ATM_SETESI:
305 { 301 {
306 int i; 302 int i;
307 303
308 for (i = 0; i < ESI_LEN; i++) 304 for (i = 0; i < ESI_LEN; i++)
309 if (dev->esi[i]) { 305 if (dev->esi[i]) {
310 error = -EEXIST; 306 error = -EEXIST;
311 goto done;
312 }
313 }
314 /* fall through */
315 case ATM_SETESIF:
316 {
317 unsigned char esi[ESI_LEN];
318
319 if (!capable(CAP_NET_ADMIN)) {
320 error = -EPERM;
321 goto done;
322 }
323 if (copy_from_user(esi, buf, ESI_LEN)) {
324 error = -EFAULT;
325 goto done;
326 }
327 memcpy(dev->esi, esi, ESI_LEN);
328 error = ESI_LEN;
329 goto done;
330 }
331 case ATM_GETSTATZ:
332 if (!capable(CAP_NET_ADMIN)) {
333 error = -EPERM;
334 goto done;
335 }
336 /* fall through */
337 case ATM_GETSTAT:
338 size = sizeof(struct atm_dev_stats);
339 error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
340 if (error)
341 goto done;
342 break;
343 case ATM_GETCIRANGE:
344 size = sizeof(struct atm_cirange);
345 if (copy_to_user(buf, &dev->ci_range, size)) {
346 error = -EFAULT;
347 goto done;
348 }
349 break;
350 case ATM_GETLINKRATE:
351 size = sizeof(int);
352 if (copy_to_user(buf, &dev->link_rate, size)) {
353 error = -EFAULT;
354 goto done;
355 }
356 break;
357 case ATM_RSTADDR:
358 if (!capable(CAP_NET_ADMIN)) {
359 error = -EPERM;
360 goto done;
361 }
362 atm_reset_addr(dev, ATM_ADDR_LOCAL);
363 break;
364 case ATM_ADDADDR:
365 case ATM_DELADDR:
366 case ATM_ADDLECSADDR:
367 case ATM_DELLECSADDR:
368 if (!capable(CAP_NET_ADMIN)) {
369 error = -EPERM;
370 goto done;
371 }
372 {
373 struct sockaddr_atmsvc addr;
374
375 if (copy_from_user(&addr, buf, sizeof(addr))) {
376 error = -EFAULT;
377 goto done;
378 }
379 if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
380 error = atm_add_addr(dev, &addr,
381 (cmd == ATM_ADDADDR ?
382 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
383 else
384 error = atm_del_addr(dev, &addr,
385 (cmd == ATM_DELADDR ?
386 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
387 goto done; 307 goto done;
388 } 308 }
389 case ATM_GETADDR: 309 }
390 case ATM_GETLECSADDR: 310 /* fall through */
391 error = atm_get_addr(dev, buf, len, 311 case ATM_SETESIF:
392 (cmd == ATM_GETADDR ? 312 {
313 unsigned char esi[ESI_LEN];
314
315 if (!capable(CAP_NET_ADMIN)) {
316 error = -EPERM;
317 goto done;
318 }
319 if (copy_from_user(esi, buf, ESI_LEN)) {
320 error = -EFAULT;
321 goto done;
322 }
323 memcpy(dev->esi, esi, ESI_LEN);
324 error = ESI_LEN;
325 goto done;
326 }
327 case ATM_GETSTATZ:
328 if (!capable(CAP_NET_ADMIN)) {
329 error = -EPERM;
330 goto done;
331 }
332 /* fall through */
333 case ATM_GETSTAT:
334 size = sizeof(struct atm_dev_stats);
335 error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
336 if (error)
337 goto done;
338 break;
339 case ATM_GETCIRANGE:
340 size = sizeof(struct atm_cirange);
341 if (copy_to_user(buf, &dev->ci_range, size)) {
342 error = -EFAULT;
343 goto done;
344 }
345 break;
346 case ATM_GETLINKRATE:
347 size = sizeof(int);
348 if (copy_to_user(buf, &dev->link_rate, size)) {
349 error = -EFAULT;
350 goto done;
351 }
352 break;
353 case ATM_RSTADDR:
354 if (!capable(CAP_NET_ADMIN)) {
355 error = -EPERM;
356 goto done;
357 }
358 atm_reset_addr(dev, ATM_ADDR_LOCAL);
359 break;
360 case ATM_ADDADDR:
361 case ATM_DELADDR:
362 case ATM_ADDLECSADDR:
363 case ATM_DELLECSADDR:
364 {
365 struct sockaddr_atmsvc addr;
366
367 if (!capable(CAP_NET_ADMIN)) {
368 error = -EPERM;
369 goto done;
370 }
371
372 if (copy_from_user(&addr, buf, sizeof(addr))) {
373 error = -EFAULT;
374 goto done;
375 }
376 if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
377 error = atm_add_addr(dev, &addr,
378 (cmd == ATM_ADDADDR ?
393 ATM_ADDR_LOCAL : ATM_ADDR_LECS)); 379 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
394 if (error < 0) 380 else
395 goto done; 381 error = atm_del_addr(dev, &addr,
396 size = error; 382 (cmd == ATM_DELADDR ?
397 /* may return 0, but later on size == 0 means "don't 383 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
398 write the length" */ 384 goto done;
399 error = put_user(size, sioc_len) 385 }
400 ? -EFAULT : 0; 386 case ATM_GETADDR:
387 case ATM_GETLECSADDR:
388 error = atm_get_addr(dev, buf, len,
389 (cmd == ATM_GETADDR ?
390 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
391 if (error < 0)
392 goto done;
393 size = error;
394 /* may return 0, but later on size == 0 means "don't
395 write the length" */
396 error = put_user(size, sioc_len) ? -EFAULT : 0;
397 goto done;
398 case ATM_SETLOOP:
399 if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
400 __ATM_LM_XTLOC((int) (unsigned long) buf) >
401 __ATM_LM_XTRMT((int) (unsigned long) buf)) {
402 error = -EINVAL;
401 goto done; 403 goto done;
402 case ATM_SETLOOP: 404 }
403 if (__ATM_LM_XTRMT((int) (unsigned long) buf) && 405 /* fall through */
404 __ATM_LM_XTLOC((int) (unsigned long) buf) > 406 case ATM_SETCIRANGE:
405 __ATM_LM_XTRMT((int) (unsigned long) buf)) { 407 case SONET_GETSTATZ:
408 case SONET_SETDIAG:
409 case SONET_CLRDIAG:
410 case SONET_SETFRAMING:
411 if (!capable(CAP_NET_ADMIN)) {
412 error = -EPERM;
413 goto done;
414 }
415 /* fall through */
416 default:
417 if (compat) {
418#ifdef CONFIG_COMPAT
419 if (!dev->ops->compat_ioctl) {
406 error = -EINVAL; 420 error = -EINVAL;
407 goto done; 421 goto done;
408 } 422 }
409 /* fall through */ 423 size = dev->ops->compat_ioctl(dev, cmd, buf);
410 case ATM_SETCIRANGE:
411 case SONET_GETSTATZ:
412 case SONET_SETDIAG:
413 case SONET_CLRDIAG:
414 case SONET_SETFRAMING:
415 if (!capable(CAP_NET_ADMIN)) {
416 error = -EPERM;
417 goto done;
418 }
419 /* fall through */
420 default:
421 if (compat) {
422#ifdef CONFIG_COMPAT
423 if (!dev->ops->compat_ioctl) {
424 error = -EINVAL;
425 goto done;
426 }
427 size = dev->ops->compat_ioctl(dev, cmd, buf);
428#endif 424#endif
429 } else { 425 } else {
430 if (!dev->ops->ioctl) { 426 if (!dev->ops->ioctl) {
431 error = -EINVAL; 427 error = -EINVAL;
432 goto done;
433 }
434 size = dev->ops->ioctl(dev, cmd, buf);
435 }
436 if (size < 0) {
437 error = (size == -ENOIOCTLCMD ? -EINVAL : size);
438 goto done; 428 goto done;
439 } 429 }
430 size = dev->ops->ioctl(dev, cmd, buf);
431 }
432 if (size < 0) {
433 error = (size == -ENOIOCTLCMD ? -EINVAL : size);
434 goto done;
435 }
440 } 436 }
441 437
442 if (size) 438 if (size)
443 error = put_user(size, sioc_len) 439 error = put_user(size, sioc_len) ? -EFAULT : 0;
444 ? -EFAULT : 0;
445 else 440 else
446 error = 0; 441 error = 0;
447done: 442done:
@@ -449,7 +444,7 @@ done:
449 return error; 444 return error;
450} 445}
451 446
452static __inline__ void *dev_get_idx(loff_t left) 447static inline void *dev_get_idx(loff_t left)
453{ 448{
454 struct list_head *p; 449 struct list_head *p;
455 450
@@ -478,8 +473,3 @@ void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
478 ? atm_devs.next : ((struct list_head *)v)->next; 473 ? atm_devs.next : ((struct list_head *)v)->next;
479 return (v == &atm_devs) ? NULL : v; 474 return (v == &atm_devs) ? NULL : v;
480} 475}
481
482
483EXPORT_SYMBOL(atm_dev_register);
484EXPORT_SYMBOL(atm_dev_deregister);
485EXPORT_SYMBOL(atm_dev_lookup);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 22992140052..ad1d28ae512 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/errno.h> /* error codes */ 7#include <linux/errno.h> /* error codes */
7#include <linux/kernel.h> /* printk */ 8#include <linux/kernel.h> /* printk */
@@ -17,7 +18,6 @@
17#include "resources.h" 18#include "resources.h"
18#include "signaling.h" 19#include "signaling.h"
19 20
20
21#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets 21#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets
22 should block until the demon runs. 22 should block until the demon runs.
23 Danger: may cause nasty hangs if the demon 23 Danger: may cause nasty hangs if the demon
@@ -28,60 +28,59 @@ struct atm_vcc *sigd = NULL;
28static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep); 28static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
29#endif 29#endif
30 30
31
32static void sigd_put_skb(struct sk_buff *skb) 31static void sigd_put_skb(struct sk_buff *skb)
33{ 32{
34#ifdef WAIT_FOR_DEMON 33#ifdef WAIT_FOR_DEMON
35 DECLARE_WAITQUEUE(wait,current); 34 DECLARE_WAITQUEUE(wait, current);
36 35
37 add_wait_queue(&sigd_sleep,&wait); 36 add_wait_queue(&sigd_sleep, &wait);
38 while (!sigd) { 37 while (!sigd) {
39 set_current_state(TASK_UNINTERRUPTIBLE); 38 set_current_state(TASK_UNINTERRUPTIBLE);
40 pr_debug("atmsvc: waiting for signaling demon...\n"); 39 pr_debug("atmsvc: waiting for signaling daemon...\n");
41 schedule(); 40 schedule();
42 } 41 }
43 current->state = TASK_RUNNING; 42 current->state = TASK_RUNNING;
44 remove_wait_queue(&sigd_sleep,&wait); 43 remove_wait_queue(&sigd_sleep, &wait);
45#else 44#else
46 if (!sigd) { 45 if (!sigd) {
47 pr_debug("atmsvc: no signaling demon\n"); 46 pr_debug("atmsvc: no signaling daemon\n");
48 kfree_skb(skb); 47 kfree_skb(skb);
49 return; 48 return;
50 } 49 }
51#endif 50#endif
52 atm_force_charge(sigd,skb->truesize); 51 atm_force_charge(sigd, skb->truesize);
53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue,skb); 52 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
54 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len); 53 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len);
55} 54}
56 55
57 56static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
58static void modify_qos(struct atm_vcc *vcc,struct atmsvc_msg *msg)
59{ 57{
60 struct sk_buff *skb; 58 struct sk_buff *skb;
61 59
62 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 60 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
63 !test_bit(ATM_VF_READY,&vcc->flags)) 61 !test_bit(ATM_VF_READY, &vcc->flags))
64 return; 62 return;
65 msg->type = as_error; 63 msg->type = as_error;
66 if (!vcc->dev->ops->change_qos) msg->reply = -EOPNOTSUPP; 64 if (!vcc->dev->ops->change_qos)
65 msg->reply = -EOPNOTSUPP;
67 else { 66 else {
68 /* should lock VCC */ 67 /* should lock VCC */
69 msg->reply = vcc->dev->ops->change_qos(vcc,&msg->qos, 68 msg->reply = vcc->dev->ops->change_qos(vcc, &msg->qos,
70 msg->reply); 69 msg->reply);
71 if (!msg->reply) msg->type = as_okay; 70 if (!msg->reply)
71 msg->type = as_okay;
72 } 72 }
73 /* 73 /*
74 * Should probably just turn around the old skb. But the, the buffer 74 * Should probably just turn around the old skb. But the, the buffer
75 * space accounting needs to follow the change too. Maybe later. 75 * space accounting needs to follow the change too. Maybe later.
76 */ 76 */
77 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL))) 77 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
78 schedule(); 78 schedule();
79 *(struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)) = *msg; 79 *(struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)) = *msg;
80 sigd_put_skb(skb); 80 sigd_put_skb(skb);
81} 81}
82 82
83 83static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
84static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
85{ 84{
86 struct atmsvc_msg *msg; 85 struct atmsvc_msg *msg;
87 struct atm_vcc *session_vcc; 86 struct atm_vcc *session_vcc;
@@ -90,69 +89,68 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
90 msg = (struct atmsvc_msg *) skb->data; 89 msg = (struct atmsvc_msg *) skb->data;
91 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 90 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
92 vcc = *(struct atm_vcc **) &msg->vcc; 91 vcc = *(struct atm_vcc **) &msg->vcc;
93 pr_debug("sigd_send %d (0x%lx)\n",(int) msg->type, 92 pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
94 (unsigned long) vcc);
95 sk = sk_atm(vcc); 93 sk = sk_atm(vcc);
96 94
97 switch (msg->type) { 95 switch (msg->type) {
98 case as_okay: 96 case as_okay:
99 sk->sk_err = -msg->reply; 97 sk->sk_err = -msg->reply;
100 clear_bit(ATM_VF_WAITING, &vcc->flags); 98 clear_bit(ATM_VF_WAITING, &vcc->flags);
101 if (!*vcc->local.sas_addr.prv && 99 if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
102 !*vcc->local.sas_addr.pub) { 100 vcc->local.sas_family = AF_ATMSVC;
103 vcc->local.sas_family = AF_ATMSVC; 101 memcpy(vcc->local.sas_addr.prv,
104 memcpy(vcc->local.sas_addr.prv, 102 msg->local.sas_addr.prv, ATM_ESA_LEN);
105 msg->local.sas_addr.prv,ATM_ESA_LEN); 103 memcpy(vcc->local.sas_addr.pub,
106 memcpy(vcc->local.sas_addr.pub, 104 msg->local.sas_addr.pub, ATM_E164_LEN + 1);
107 msg->local.sas_addr.pub,ATM_E164_LEN+1); 105 }
108 } 106 session_vcc = vcc->session ? vcc->session : vcc;
109 session_vcc = vcc->session ? vcc->session : vcc; 107 if (session_vcc->vpi || session_vcc->vci)
110 if (session_vcc->vpi || session_vcc->vci) break;
111 session_vcc->itf = msg->pvc.sap_addr.itf;
112 session_vcc->vpi = msg->pvc.sap_addr.vpi;
113 session_vcc->vci = msg->pvc.sap_addr.vci;
114 if (session_vcc->vpi || session_vcc->vci)
115 session_vcc->qos = msg->qos;
116 break;
117 case as_error:
118 clear_bit(ATM_VF_REGIS,&vcc->flags);
119 clear_bit(ATM_VF_READY,&vcc->flags);
120 sk->sk_err = -msg->reply;
121 clear_bit(ATM_VF_WAITING, &vcc->flags);
122 break; 108 break;
123 case as_indicate: 109 session_vcc->itf = msg->pvc.sap_addr.itf;
124 vcc = *(struct atm_vcc **) &msg->listen_vcc; 110 session_vcc->vpi = msg->pvc.sap_addr.vpi;
125 sk = sk_atm(vcc); 111 session_vcc->vci = msg->pvc.sap_addr.vci;
126 pr_debug("as_indicate!!!\n"); 112 if (session_vcc->vpi || session_vcc->vci)
127 lock_sock(sk); 113 session_vcc->qos = msg->qos;
128 if (sk_acceptq_is_full(sk)) { 114 break;
129 sigd_enq(NULL,as_reject,vcc,NULL,NULL); 115 case as_error:
130 dev_kfree_skb(skb); 116 clear_bit(ATM_VF_REGIS, &vcc->flags);
131 goto as_indicate_complete; 117 clear_bit(ATM_VF_READY, &vcc->flags);
132 } 118 sk->sk_err = -msg->reply;
133 sk->sk_ack_backlog++; 119 clear_bit(ATM_VF_WAITING, &vcc->flags);
134 skb_queue_tail(&sk->sk_receive_queue, skb); 120 break;
135 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); 121 case as_indicate:
136 sk->sk_state_change(sk); 122 vcc = *(struct atm_vcc **)&msg->listen_vcc;
123 sk = sk_atm(vcc);
124 pr_debug("as_indicate!!!\n");
125 lock_sock(sk);
126 if (sk_acceptq_is_full(sk)) {
127 sigd_enq(NULL, as_reject, vcc, NULL, NULL);
128 dev_kfree_skb(skb);
129 goto as_indicate_complete;
130 }
131 sk->sk_ack_backlog++;
132 skb_queue_tail(&sk->sk_receive_queue, skb);
133 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep);
134 sk->sk_state_change(sk);
137as_indicate_complete: 135as_indicate_complete:
138 release_sock(sk); 136 release_sock(sk);
139 return 0; 137 return 0;
140 case as_close: 138 case as_close:
141 set_bit(ATM_VF_RELEASED,&vcc->flags); 139 set_bit(ATM_VF_RELEASED, &vcc->flags);
142 vcc_release_async(vcc, msg->reply); 140 vcc_release_async(vcc, msg->reply);
143 goto out; 141 goto out;
144 case as_modify: 142 case as_modify:
145 modify_qos(vcc,msg); 143 modify_qos(vcc, msg);
146 break; 144 break;
147 case as_addparty: 145 case as_addparty:
148 case as_dropparty: 146 case as_dropparty:
149 sk->sk_err_soft = msg->reply; /* < 0 failure, otherwise ep_ref */ 147 sk->sk_err_soft = msg->reply;
150 clear_bit(ATM_VF_WAITING, &vcc->flags); 148 /* < 0 failure, otherwise ep_ref */
151 break; 149 clear_bit(ATM_VF_WAITING, &vcc->flags);
152 default: 150 break;
153 printk(KERN_ALERT "sigd_send: bad message type %d\n", 151 default:
154 (int) msg->type); 152 pr_alert("bad message type %d\n", (int)msg->type);
155 return -EINVAL; 153 return -EINVAL;
156 } 154 }
157 sk->sk_state_change(sk); 155 sk->sk_state_change(sk);
158out: 156out:
@@ -160,48 +158,52 @@ out:
160 return 0; 158 return 0;
161} 159}
162 160
163 161void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
164void sigd_enq2(struct atm_vcc *vcc,enum atmsvc_msg_type type, 162 struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
165 struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc, 163 const struct sockaddr_atmsvc *svc, const struct atm_qos *qos,
166 const struct sockaddr_atmsvc *svc,const struct atm_qos *qos,int reply) 164 int reply)
167{ 165{
168 struct sk_buff *skb; 166 struct sk_buff *skb;
169 struct atmsvc_msg *msg; 167 struct atmsvc_msg *msg;
170 static unsigned session = 0; 168 static unsigned session = 0;
171 169
172 pr_debug("sigd_enq %d (0x%p)\n",(int) type,vcc); 170 pr_debug("%d (0x%p)\n", (int)type, vcc);
173 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL))) 171 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
174 schedule(); 172 schedule();
175 msg = (struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)); 173 msg = (struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg));
176 memset(msg,0,sizeof(*msg)); 174 memset(msg, 0, sizeof(*msg));
177 msg->type = type; 175 msg->type = type;
178 *(struct atm_vcc **) &msg->vcc = vcc; 176 *(struct atm_vcc **) &msg->vcc = vcc;
179 *(struct atm_vcc **) &msg->listen_vcc = listen_vcc; 177 *(struct atm_vcc **) &msg->listen_vcc = listen_vcc;
180 msg->reply = reply; 178 msg->reply = reply;
181 if (qos) msg->qos = *qos; 179 if (qos)
182 if (vcc) msg->sap = vcc->sap; 180 msg->qos = *qos;
183 if (svc) msg->svc = *svc; 181 if (vcc)
184 if (vcc) msg->local = vcc->local; 182 msg->sap = vcc->sap;
185 if (pvc) msg->pvc = *pvc; 183 if (svc)
184 msg->svc = *svc;
185 if (vcc)
186 msg->local = vcc->local;
187 if (pvc)
188 msg->pvc = *pvc;
186 if (vcc) { 189 if (vcc) {
187 if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags)) 190 if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags))
188 msg->session = ++session; 191 msg->session = ++session;
189 /* every new pmp connect gets the next session number */ 192 /* every new pmp connect gets the next session number */
190 } 193 }
191 sigd_put_skb(skb); 194 sigd_put_skb(skb);
192 if (vcc) set_bit(ATM_VF_REGIS,&vcc->flags); 195 if (vcc)
196 set_bit(ATM_VF_REGIS, &vcc->flags);
193} 197}
194 198
195 199void sigd_enq(struct atm_vcc *vcc, enum atmsvc_msg_type type,
196void sigd_enq(struct atm_vcc *vcc,enum atmsvc_msg_type type, 200 struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
197 struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc, 201 const struct sockaddr_atmsvc *svc)
198 const struct sockaddr_atmsvc *svc)
199{ 202{
200 sigd_enq2(vcc,type,listen_vcc,pvc,svc,vcc ? &vcc->qos : NULL,0); 203 sigd_enq2(vcc, type, listen_vcc, pvc, svc, vcc ? &vcc->qos : NULL, 0);
201 /* other ISP applications may use "reply" */ 204 /* other ISP applications may use "reply" */
202} 205}
203 206
204
205static void purge_vcc(struct atm_vcc *vcc) 207static void purge_vcc(struct atm_vcc *vcc)
206{ 208{
207 if (sk_atm(vcc)->sk_family == PF_ATMSVC && 209 if (sk_atm(vcc)->sk_family == PF_ATMSVC &&
@@ -212,21 +214,20 @@ static void purge_vcc(struct atm_vcc *vcc)
212 } 214 }
213} 215}
214 216
215
216static void sigd_close(struct atm_vcc *vcc) 217static void sigd_close(struct atm_vcc *vcc)
217{ 218{
218 struct hlist_node *node; 219 struct hlist_node *node;
219 struct sock *s; 220 struct sock *s;
220 int i; 221 int i;
221 222
222 pr_debug("sigd_close\n"); 223 pr_debug("\n");
223 sigd = NULL; 224 sigd = NULL;
224 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 225 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
225 printk(KERN_ERR "sigd_close: closing with requests pending\n"); 226 pr_err("closing with requests pending\n");
226 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); 227 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
227 228
228 read_lock(&vcc_sklist_lock); 229 read_lock(&vcc_sklist_lock);
229 for(i = 0; i < VCC_HTABLE_SIZE; ++i) { 230 for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
230 struct hlist_head *head = &vcc_hash[i]; 231 struct hlist_head *head = &vcc_hash[i];
231 232
232 sk_for_each(s, node, head) { 233 sk_for_each(s, node, head) {
@@ -238,13 +239,11 @@ static void sigd_close(struct atm_vcc *vcc)
238 read_unlock(&vcc_sklist_lock); 239 read_unlock(&vcc_sklist_lock);
239} 240}
240 241
241
242static struct atmdev_ops sigd_dev_ops = { 242static struct atmdev_ops sigd_dev_ops = {
243 .close = sigd_close, 243 .close = sigd_close,
244 .send = sigd_send 244 .send = sigd_send
245}; 245};
246 246
247
248static struct atm_dev sigd_dev = { 247static struct atm_dev sigd_dev = {
249 .ops = &sigd_dev_ops, 248 .ops = &sigd_dev_ops,
250 .type = "sig", 249 .type = "sig",
@@ -252,16 +251,16 @@ static struct atm_dev sigd_dev = {
252 .lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock) 251 .lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock)
253}; 252};
254 253
255
256int sigd_attach(struct atm_vcc *vcc) 254int sigd_attach(struct atm_vcc *vcc)
257{ 255{
258 if (sigd) return -EADDRINUSE; 256 if (sigd)
259 pr_debug("sigd_attach\n"); 257 return -EADDRINUSE;
258 pr_debug("\n");
260 sigd = vcc; 259 sigd = vcc;
261 vcc->dev = &sigd_dev; 260 vcc->dev = &sigd_dev;
262 vcc_insert_socket(sk_atm(vcc)); 261 vcc_insert_socket(sk_atm(vcc));
263 set_bit(ATM_VF_META,&vcc->flags); 262 set_bit(ATM_VF_META, &vcc->flags);
264 set_bit(ATM_VF_READY,&vcc->flags); 263 set_bit(ATM_VF_READY, &vcc->flags);
265#ifdef WAIT_FOR_DEMON 264#ifdef WAIT_FOR_DEMON
266 wake_up(&sigd_sleep); 265 wake_up(&sigd_sleep);
267#endif 266#endif
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 66e1d9b3e5d..3ba9a45a51a 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/string.h> 7#include <linux/string.h>
7#include <linux/net.h> /* struct socket, struct proto_ops */ 8#include <linux/net.h> /* struct socket, struct proto_ops */
@@ -18,14 +19,15 @@
18#include <linux/atmdev.h> 19#include <linux/atmdev.h>
19#include <linux/bitops.h> 20#include <linux/bitops.h>
20#include <net/sock.h> /* for sock_no_* */ 21#include <net/sock.h> /* for sock_no_* */
21#include <asm/uaccess.h> 22#include <linux/uaccess.h>
22 23
23#include "resources.h" 24#include "resources.h"
24#include "common.h" /* common for PVCs and SVCs */ 25#include "common.h" /* common for PVCs and SVCs */
25#include "signaling.h" 26#include "signaling.h"
26#include "addr.h" 27#include "addr.h"
27 28
28static int svc_create(struct net *net, struct socket *sock, int protocol, int kern); 29static int svc_create(struct net *net, struct socket *sock, int protocol,
30 int kern);
29 31
30/* 32/*
31 * Note: since all this is still nicely synchronized with the signaling demon, 33 * Note: since all this is still nicely synchronized with the signaling demon,
@@ -34,25 +36,25 @@ static int svc_create(struct net *net, struct socket *sock, int protocol, int ke
34 */ 36 */
35 37
36 38
37static int svc_shutdown(struct socket *sock,int how) 39static int svc_shutdown(struct socket *sock, int how)
38{ 40{
39 return 0; 41 return 0;
40} 42}
41 43
42
43static void svc_disconnect(struct atm_vcc *vcc) 44static void svc_disconnect(struct atm_vcc *vcc)
44{ 45{
45 DEFINE_WAIT(wait); 46 DEFINE_WAIT(wait);
46 struct sk_buff *skb; 47 struct sk_buff *skb;
47 struct sock *sk = sk_atm(vcc); 48 struct sock *sk = sk_atm(vcc);
48 49
49 pr_debug("svc_disconnect %p\n",vcc); 50 pr_debug("%p\n", vcc);
50 if (test_bit(ATM_VF_REGIS,&vcc->flags)) { 51 if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
51 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 52 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
52 sigd_enq(vcc,as_close,NULL,NULL,NULL); 53 sigd_enq(vcc, as_close, NULL, NULL, NULL);
53 while (!test_bit(ATM_VF_RELEASED,&vcc->flags) && sigd) { 54 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
54 schedule(); 55 schedule();
55 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 56 prepare_to_wait(sk->sk_sleep, &wait,
57 TASK_UNINTERRUPTIBLE);
56 } 58 }
57 finish_wait(sk->sk_sleep, &wait); 59 finish_wait(sk->sk_sleep, &wait);
58 } 60 }
@@ -61,35 +63,35 @@ static void svc_disconnect(struct atm_vcc *vcc)
61 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 63 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
62 atm_return(vcc, skb->truesize); 64 atm_return(vcc, skb->truesize);
63 pr_debug("LISTEN REL\n"); 65 pr_debug("LISTEN REL\n");
64 sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0); 66 sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0);
65 dev_kfree_skb(skb); 67 dev_kfree_skb(skb);
66 } 68 }
67 clear_bit(ATM_VF_REGIS, &vcc->flags); 69 clear_bit(ATM_VF_REGIS, &vcc->flags);
68 /* ... may retry later */ 70 /* ... may retry later */
69} 71}
70 72
71
72static int svc_release(struct socket *sock) 73static int svc_release(struct socket *sock)
73{ 74{
74 struct sock *sk = sock->sk; 75 struct sock *sk = sock->sk;
75 struct atm_vcc *vcc; 76 struct atm_vcc *vcc;
76 77
77 if (sk) { 78 if (sk) {
78 vcc = ATM_SD(sock); 79 vcc = ATM_SD(sock);
79 pr_debug("svc_release %p\n", vcc); 80 pr_debug("%p\n", vcc);
80 clear_bit(ATM_VF_READY, &vcc->flags); 81 clear_bit(ATM_VF_READY, &vcc->flags);
81 /* VCC pointer is used as a reference, so we must not free it 82 /*
82 (thereby subjecting it to re-use) before all pending connections 83 * VCC pointer is used as a reference,
83 are closed */ 84 * so we must not free it (thereby subjecting it to re-use)
85 * before all pending connections are closed
86 */
84 svc_disconnect(vcc); 87 svc_disconnect(vcc);
85 vcc_release(sock); 88 vcc_release(sock);
86 } 89 }
87 return 0; 90 return 0;
88} 91}
89 92
90 93static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
91static int svc_bind(struct socket *sock,struct sockaddr *sockaddr, 94 int sockaddr_len)
92 int sockaddr_len)
93{ 95{
94 DEFINE_WAIT(wait); 96 DEFINE_WAIT(wait);
95 struct sock *sk = sock->sk; 97 struct sock *sk = sock->sk;
@@ -114,38 +116,37 @@ static int svc_bind(struct socket *sock,struct sockaddr *sockaddr,
114 error = -EAFNOSUPPORT; 116 error = -EAFNOSUPPORT;
115 goto out; 117 goto out;
116 } 118 }
117 clear_bit(ATM_VF_BOUND,&vcc->flags); 119 clear_bit(ATM_VF_BOUND, &vcc->flags);
118 /* failing rebind will kill old binding */ 120 /* failing rebind will kill old binding */
119 /* @@@ check memory (de)allocation on rebind */ 121 /* @@@ check memory (de)allocation on rebind */
120 if (!test_bit(ATM_VF_HASQOS,&vcc->flags)) { 122 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
121 error = -EBADFD; 123 error = -EBADFD;
122 goto out; 124 goto out;
123 } 125 }
124 vcc->local = *addr; 126 vcc->local = *addr;
125 set_bit(ATM_VF_WAITING, &vcc->flags); 127 set_bit(ATM_VF_WAITING, &vcc->flags);
126 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 128 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
127 sigd_enq(vcc,as_bind,NULL,NULL,&vcc->local); 129 sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
128 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 130 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
129 schedule(); 131 schedule();
130 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 132 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
131 } 133 }
132 finish_wait(sk->sk_sleep, &wait); 134 finish_wait(sk->sk_sleep, &wait);
133 clear_bit(ATM_VF_REGIS,&vcc->flags); /* doesn't count */ 135 clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
134 if (!sigd) { 136 if (!sigd) {
135 error = -EUNATCH; 137 error = -EUNATCH;
136 goto out; 138 goto out;
137 } 139 }
138 if (!sk->sk_err) 140 if (!sk->sk_err)
139 set_bit(ATM_VF_BOUND,&vcc->flags); 141 set_bit(ATM_VF_BOUND, &vcc->flags);
140 error = -sk->sk_err; 142 error = -sk->sk_err;
141out: 143out:
142 release_sock(sk); 144 release_sock(sk);
143 return error; 145 return error;
144} 146}
145 147
146 148static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
147static int svc_connect(struct socket *sock,struct sockaddr *sockaddr, 149 int sockaddr_len, int flags)
148 int sockaddr_len,int flags)
149{ 150{
150 DEFINE_WAIT(wait); 151 DEFINE_WAIT(wait);
151 struct sock *sk = sock->sk; 152 struct sock *sk = sock->sk;
@@ -153,7 +154,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
153 struct atm_vcc *vcc = ATM_SD(sock); 154 struct atm_vcc *vcc = ATM_SD(sock);
154 int error; 155 int error;
155 156
156 pr_debug("svc_connect %p\n",vcc); 157 pr_debug("%p\n", vcc);
157 lock_sock(sk); 158 lock_sock(sk);
158 if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) { 159 if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) {
159 error = -EINVAL; 160 error = -EINVAL;
@@ -201,7 +202,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
201 vcc->remote = *addr; 202 vcc->remote = *addr;
202 set_bit(ATM_VF_WAITING, &vcc->flags); 203 set_bit(ATM_VF_WAITING, &vcc->flags);
203 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 204 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
204 sigd_enq(vcc,as_connect,NULL,NULL,&vcc->remote); 205 sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
205 if (flags & O_NONBLOCK) { 206 if (flags & O_NONBLOCK) {
206 finish_wait(sk->sk_sleep, &wait); 207 finish_wait(sk->sk_sleep, &wait);
207 sock->state = SS_CONNECTING; 208 sock->state = SS_CONNECTING;
@@ -212,7 +213,8 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
212 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 213 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
213 schedule(); 214 schedule();
214 if (!signal_pending(current)) { 215 if (!signal_pending(current)) {
215 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 216 prepare_to_wait(sk->sk_sleep, &wait,
217 TASK_INTERRUPTIBLE);
216 continue; 218 continue;
217 } 219 }
218 pr_debug("*ABORT*\n"); 220 pr_debug("*ABORT*\n");
@@ -228,20 +230,22 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
228 * Kernel <--okay---- Demon 230 * Kernel <--okay---- Demon
229 * Kernel <--close--- Demon 231 * Kernel <--close--- Demon
230 */ 232 */
231 sigd_enq(vcc,as_close,NULL,NULL,NULL); 233 sigd_enq(vcc, as_close, NULL, NULL, NULL);
232 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 234 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
233 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 235 prepare_to_wait(sk->sk_sleep, &wait,
236 TASK_INTERRUPTIBLE);
234 schedule(); 237 schedule();
235 } 238 }
236 if (!sk->sk_err) 239 if (!sk->sk_err)
237 while (!test_bit(ATM_VF_RELEASED,&vcc->flags) 240 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
238 && sigd) { 241 sigd) {
239 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 242 prepare_to_wait(sk->sk_sleep, &wait,
243 TASK_INTERRUPTIBLE);
240 schedule(); 244 schedule();
241 } 245 }
242 clear_bit(ATM_VF_REGIS,&vcc->flags); 246 clear_bit(ATM_VF_REGIS, &vcc->flags);
243 clear_bit(ATM_VF_RELEASED,&vcc->flags); 247 clear_bit(ATM_VF_RELEASED, &vcc->flags);
244 clear_bit(ATM_VF_CLOSE,&vcc->flags); 248 clear_bit(ATM_VF_CLOSE, &vcc->flags);
245 /* we're gone now but may connect later */ 249 /* we're gone now but may connect later */
246 error = -EINTR; 250 error = -EINTR;
247 break; 251 break;
@@ -269,37 +273,37 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
269/* 273/*
270 * #endif 274 * #endif
271 */ 275 */
272 if (!(error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci))) 276 error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
277 if (!error)
273 sock->state = SS_CONNECTED; 278 sock->state = SS_CONNECTED;
274 else 279 else
275 (void) svc_disconnect(vcc); 280 (void)svc_disconnect(vcc);
276out: 281out:
277 release_sock(sk); 282 release_sock(sk);
278 return error; 283 return error;
279} 284}
280 285
281 286static int svc_listen(struct socket *sock, int backlog)
282static int svc_listen(struct socket *sock,int backlog)
283{ 287{
284 DEFINE_WAIT(wait); 288 DEFINE_WAIT(wait);
285 struct sock *sk = sock->sk; 289 struct sock *sk = sock->sk;
286 struct atm_vcc *vcc = ATM_SD(sock); 290 struct atm_vcc *vcc = ATM_SD(sock);
287 int error; 291 int error;
288 292
289 pr_debug("svc_listen %p\n",vcc); 293 pr_debug("%p\n", vcc);
290 lock_sock(sk); 294 lock_sock(sk);
291 /* let server handle listen on unbound sockets */ 295 /* let server handle listen on unbound sockets */
292 if (test_bit(ATM_VF_SESSION,&vcc->flags)) { 296 if (test_bit(ATM_VF_SESSION, &vcc->flags)) {
293 error = -EINVAL; 297 error = -EINVAL;
294 goto out; 298 goto out;
295 } 299 }
296 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { 300 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
297 error = -EADDRINUSE; 301 error = -EADDRINUSE;
298 goto out; 302 goto out;
299 } 303 }
300 set_bit(ATM_VF_WAITING, &vcc->flags); 304 set_bit(ATM_VF_WAITING, &vcc->flags);
301 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
302 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); 306 sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
303 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 307 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
304 schedule(); 308 schedule();
305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 309 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
@@ -309,7 +313,7 @@ static int svc_listen(struct socket *sock,int backlog)
309 error = -EUNATCH; 313 error = -EUNATCH;
310 goto out; 314 goto out;
311 } 315 }
312 set_bit(ATM_VF_LISTEN,&vcc->flags); 316 set_bit(ATM_VF_LISTEN, &vcc->flags);
313 vcc_insert_socket(sk); 317 vcc_insert_socket(sk);
314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; 318 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
315 error = -sk->sk_err; 319 error = -sk->sk_err;
@@ -318,8 +322,7 @@ out:
318 return error; 322 return error;
319} 323}
320 324
321 325static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
322static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
323{ 326{
324 struct sock *sk = sock->sk; 327 struct sock *sk = sock->sk;
325 struct sk_buff *skb; 328 struct sk_buff *skb;
@@ -336,15 +339,16 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
336 339
337 new_vcc = ATM_SD(newsock); 340 new_vcc = ATM_SD(newsock);
338 341
339 pr_debug("svc_accept %p -> %p\n",old_vcc,new_vcc); 342 pr_debug("%p -> %p\n", old_vcc, new_vcc);
340 while (1) { 343 while (1) {
341 DEFINE_WAIT(wait); 344 DEFINE_WAIT(wait);
342 345
343 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 346 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
344 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && 347 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
345 sigd) { 348 sigd) {
346 if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break; 349 if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
347 if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) { 350 break;
351 if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) {
348 error = -sk->sk_err; 352 error = -sk->sk_err;
349 break; 353 break;
350 } 354 }
@@ -359,7 +363,8 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
359 error = -ERESTARTSYS; 363 error = -ERESTARTSYS;
360 break; 364 break;
361 } 365 }
362 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 366 prepare_to_wait(sk->sk_sleep, &wait,
367 TASK_INTERRUPTIBLE);
363 } 368 }
364 finish_wait(sk->sk_sleep, &wait); 369 finish_wait(sk->sk_sleep, &wait);
365 if (error) 370 if (error)
@@ -368,31 +373,34 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
368 error = -EUNATCH; 373 error = -EUNATCH;
369 goto out; 374 goto out;
370 } 375 }
371 msg = (struct atmsvc_msg *) skb->data; 376 msg = (struct atmsvc_msg *)skb->data;
372 new_vcc->qos = msg->qos; 377 new_vcc->qos = msg->qos;
373 set_bit(ATM_VF_HASQOS,&new_vcc->flags); 378 set_bit(ATM_VF_HASQOS, &new_vcc->flags);
374 new_vcc->remote = msg->svc; 379 new_vcc->remote = msg->svc;
375 new_vcc->local = msg->local; 380 new_vcc->local = msg->local;
376 new_vcc->sap = msg->sap; 381 new_vcc->sap = msg->sap;
377 error = vcc_connect(newsock, msg->pvc.sap_addr.itf, 382 error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
378 msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci); 383 msg->pvc.sap_addr.vpi,
384 msg->pvc.sap_addr.vci);
379 dev_kfree_skb(skb); 385 dev_kfree_skb(skb);
380 sk->sk_ack_backlog--; 386 sk->sk_ack_backlog--;
381 if (error) { 387 if (error) {
382 sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL, 388 sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
383 &old_vcc->qos,error); 389 &old_vcc->qos, error);
384 error = error == -EAGAIN ? -EBUSY : error; 390 error = error == -EAGAIN ? -EBUSY : error;
385 goto out; 391 goto out;
386 } 392 }
387 /* wait should be short, so we ignore the non-blocking flag */ 393 /* wait should be short, so we ignore the non-blocking flag */
388 set_bit(ATM_VF_WAITING, &new_vcc->flags); 394 set_bit(ATM_VF_WAITING, &new_vcc->flags);
389 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
390 sigd_enq(new_vcc,as_accept,old_vcc,NULL,NULL); 396 TASK_UNINTERRUPTIBLE);
397 sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
391 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { 398 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
392 release_sock(sk); 399 release_sock(sk);
393 schedule(); 400 schedule();
394 lock_sock(sk); 401 lock_sock(sk);
395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 402 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
403 TASK_UNINTERRUPTIBLE);
396 } 404 }
397 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait); 405 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait);
398 if (!sigd) { 406 if (!sigd) {
@@ -412,39 +420,37 @@ out:
412 return error; 420 return error;
413} 421}
414 422
415 423static int svc_getname(struct socket *sock, struct sockaddr *sockaddr,
416static int svc_getname(struct socket *sock,struct sockaddr *sockaddr, 424 int *sockaddr_len, int peer)
417 int *sockaddr_len,int peer)
418{ 425{
419 struct sockaddr_atmsvc *addr; 426 struct sockaddr_atmsvc *addr;
420 427
421 *sockaddr_len = sizeof(struct sockaddr_atmsvc); 428 *sockaddr_len = sizeof(struct sockaddr_atmsvc);
422 addr = (struct sockaddr_atmsvc *) sockaddr; 429 addr = (struct sockaddr_atmsvc *) sockaddr;
423 memcpy(addr,peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local, 430 memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local,
424 sizeof(struct sockaddr_atmsvc)); 431 sizeof(struct sockaddr_atmsvc));
425 return 0; 432 return 0;
426} 433}
427 434
428 435int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
429int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
430{ 436{
431 struct sock *sk = sk_atm(vcc); 437 struct sock *sk = sk_atm(vcc);
432 DEFINE_WAIT(wait); 438 DEFINE_WAIT(wait);
433 439
434 set_bit(ATM_VF_WAITING, &vcc->flags); 440 set_bit(ATM_VF_WAITING, &vcc->flags);
435 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 441 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
436 sigd_enq2(vcc,as_modify,NULL,NULL,&vcc->local,qos,0); 442 sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
437 while (test_bit(ATM_VF_WAITING, &vcc->flags) && 443 while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
438 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { 444 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
439 schedule(); 445 schedule();
440 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 446 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
441 } 447 }
442 finish_wait(sk->sk_sleep, &wait); 448 finish_wait(sk->sk_sleep, &wait);
443 if (!sigd) return -EUNATCH; 449 if (!sigd)
450 return -EUNATCH;
444 return -sk->sk_err; 451 return -sk->sk_err;
445} 452}
446 453
447
448static int svc_setsockopt(struct socket *sock, int level, int optname, 454static int svc_setsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, unsigned int optlen) 455 char __user *optval, unsigned int optlen)
450{ 456{
@@ -454,37 +460,35 @@ static int svc_setsockopt(struct socket *sock, int level, int optname,
454 460
455 lock_sock(sk); 461 lock_sock(sk);
456 switch (optname) { 462 switch (optname) {
457 case SO_ATMSAP: 463 case SO_ATMSAP:
458 if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) { 464 if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) {
459 error = -EINVAL; 465 error = -EINVAL;
460 goto out; 466 goto out;
461 } 467 }
462 if (copy_from_user(&vcc->sap, optval, optlen)) { 468 if (copy_from_user(&vcc->sap, optval, optlen)) {
463 error = -EFAULT; 469 error = -EFAULT;
464 goto out; 470 goto out;
465 } 471 }
466 set_bit(ATM_VF_HASSAP, &vcc->flags); 472 set_bit(ATM_VF_HASSAP, &vcc->flags);
467 break; 473 break;
468 case SO_MULTIPOINT: 474 case SO_MULTIPOINT:
469 if (level != SOL_ATM || optlen != sizeof(int)) { 475 if (level != SOL_ATM || optlen != sizeof(int)) {
470 error = -EINVAL; 476 error = -EINVAL;
471 goto out; 477 goto out;
472 } 478 }
473 if (get_user(value, (int __user *) optval)) { 479 if (get_user(value, (int __user *)optval)) {
474 error = -EFAULT; 480 error = -EFAULT;
475 goto out; 481 goto out;
476 } 482 }
477 if (value == 1) { 483 if (value == 1)
478 set_bit(ATM_VF_SESSION, &vcc->flags); 484 set_bit(ATM_VF_SESSION, &vcc->flags);
479 } else if (value == 0) { 485 else if (value == 0)
480 clear_bit(ATM_VF_SESSION, &vcc->flags); 486 clear_bit(ATM_VF_SESSION, &vcc->flags);
481 } else { 487 else
482 error = -EINVAL; 488 error = -EINVAL;
483 } 489 break;
484 break; 490 default:
485 default: 491 error = vcc_setsockopt(sock, level, optname, optval, optlen);
486 error = vcc_setsockopt(sock, level, optname,
487 optval, optlen);
488 } 492 }
489 493
490out: 494out:
@@ -492,9 +496,8 @@ out:
492 return error; 496 return error;
493} 497}
494 498
495 499static int svc_getsockopt(struct socket *sock, int level, int optname,
496static int svc_getsockopt(struct socket *sock,int level,int optname, 500 char __user *optval, int __user *optlen)
497 char __user *optval,int __user *optlen)
498{ 501{
499 struct sock *sk = sock->sk; 502 struct sock *sk = sock->sk;
500 int error = 0, len; 503 int error = 0, len;
@@ -521,7 +524,6 @@ out:
521 return error; 524 return error;
522} 525}
523 526
524
525static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, 527static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
526 int sockaddr_len, int flags) 528 int sockaddr_len, int flags)
527{ 529{
@@ -540,7 +542,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
540 error = -EINPROGRESS; 542 error = -EINPROGRESS;
541 goto out; 543 goto out;
542 } 544 }
543 pr_debug("svc_addparty added wait queue\n"); 545 pr_debug("added wait queue\n");
544 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 546 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
545 schedule(); 547 schedule();
546 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 548 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
@@ -552,7 +554,6 @@ out:
552 return error; 554 return error;
553} 555}
554 556
555
556static int svc_dropparty(struct socket *sock, int ep_ref) 557static int svc_dropparty(struct socket *sock, int ep_ref)
557{ 558{
558 DEFINE_WAIT(wait); 559 DEFINE_WAIT(wait);
@@ -579,7 +580,6 @@ out:
579 return error; 580 return error;
580} 581}
581 582
582
583static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 583static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
584{ 584{
585 int error, ep_ref; 585 int error, ep_ref;
@@ -587,29 +587,31 @@ static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
587 struct atm_vcc *vcc = ATM_SD(sock); 587 struct atm_vcc *vcc = ATM_SD(sock);
588 588
589 switch (cmd) { 589 switch (cmd) {
590 case ATM_ADDPARTY: 590 case ATM_ADDPARTY:
591 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 591 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
592 return -EINVAL; 592 return -EINVAL;
593 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa))) 593 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa)))
594 return -EFAULT; 594 return -EFAULT;
595 error = svc_addparty(sock, (struct sockaddr *) &sa, sizeof(sa), 0); 595 error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa),
596 break; 596 0);
597 case ATM_DROPPARTY: 597 break;
598 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 598 case ATM_DROPPARTY:
599 return -EINVAL; 599 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
600 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int))) 600 return -EINVAL;
601 return -EFAULT; 601 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int)))
602 error = svc_dropparty(sock, ep_ref); 602 return -EFAULT;
603 break; 603 error = svc_dropparty(sock, ep_ref);
604 default: 604 break;
605 error = vcc_ioctl(sock, cmd, arg); 605 default:
606 error = vcc_ioctl(sock, cmd, arg);
606 } 607 }
607 608
608 return error; 609 return error;
609} 610}
610 611
611#ifdef CONFIG_COMPAT 612#ifdef CONFIG_COMPAT
612static int svc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 613static int svc_compat_ioctl(struct socket *sock, unsigned int cmd,
614 unsigned long arg)
613{ 615{
614 /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf. 616 /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf.
615 But actually it takes a struct sockaddr_atmsvc, which doesn't need 617 But actually it takes a struct sockaddr_atmsvc, which doesn't need
@@ -660,13 +662,13 @@ static int svc_create(struct net *net, struct socket *sock, int protocol,
660 662
661 sock->ops = &svc_proto_ops; 663 sock->ops = &svc_proto_ops;
662 error = vcc_create(net, sock, protocol, AF_ATMSVC); 664 error = vcc_create(net, sock, protocol, AF_ATMSVC);
663 if (error) return error; 665 if (error)
666 return error;
664 ATM_SD(sock)->local.sas_family = AF_ATMSVC; 667 ATM_SD(sock)->local.sas_family = AF_ATMSVC;
665 ATM_SD(sock)->remote.sas_family = AF_ATMSVC; 668 ATM_SD(sock)->remote.sas_family = AF_ATMSVC;
666 return 0; 669 return 0;
667} 670}
668 671
669
670static const struct net_proto_family svc_family_ops = { 672static const struct net_proto_family svc_family_ops = {
671 .family = PF_ATMSVC, 673 .family = PF_ATMSVC,
672 .create = svc_create, 674 .create = svc_create,
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 97f8d68d574..3487cfe74ae 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -21,7 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -516,33 +517,37 @@ static char *cmtp_procinfo(struct capi_ctr *ctrl)
516 return "CAPI Message Transport Protocol"; 517 return "CAPI Message Transport Protocol";
517} 518}
518 519
519static int cmtp_ctr_read_proc(char *page, char **start, off_t off, int count, int *eof, struct capi_ctr *ctrl) 520static int cmtp_proc_show(struct seq_file *m, void *v)
520{ 521{
522 struct capi_ctr *ctrl = m->private;
521 struct cmtp_session *session = ctrl->driverdata; 523 struct cmtp_session *session = ctrl->driverdata;
522 struct cmtp_application *app; 524 struct cmtp_application *app;
523 struct list_head *p, *n; 525 struct list_head *p, *n;
524 int len = 0;
525 526
526 len += sprintf(page + len, "%s\n\n", cmtp_procinfo(ctrl)); 527 seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
527 len += sprintf(page + len, "addr %s\n", session->name); 528 seq_printf(m, "addr %s\n", session->name);
528 len += sprintf(page + len, "ctrl %d\n", session->num); 529 seq_printf(m, "ctrl %d\n", session->num);
529 530
530 list_for_each_safe(p, n, &session->applications) { 531 list_for_each_safe(p, n, &session->applications) {
531 app = list_entry(p, struct cmtp_application, list); 532 app = list_entry(p, struct cmtp_application, list);
532 len += sprintf(page + len, "appl %d -> %d\n", app->appl, app->mapping); 533 seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
533 } 534 }
534 535
535 if (off + count >= len) 536 return 0;
536 *eof = 1; 537}
537
538 if (len < off)
539 return 0;
540
541 *start = page + off;
542 538
543 return ((count < len - off) ? count : len - off); 539static int cmtp_proc_open(struct inode *inode, struct file *file)
540{
541 return single_open(file, cmtp_proc_show, PDE(inode)->data);
544} 542}
545 543
544static const struct file_operations cmtp_proc_fops = {
545 .owner = THIS_MODULE,
546 .open = cmtp_proc_open,
547 .read = seq_read,
548 .llseek = seq_lseek,
549 .release = single_release,
550};
546 551
547int cmtp_attach_device(struct cmtp_session *session) 552int cmtp_attach_device(struct cmtp_session *session)
548{ 553{
@@ -582,7 +587,7 @@ int cmtp_attach_device(struct cmtp_session *session)
582 session->ctrl.send_message = cmtp_send_message; 587 session->ctrl.send_message = cmtp_send_message;
583 588
584 session->ctrl.procinfo = cmtp_procinfo; 589 session->ctrl.procinfo = cmtp_procinfo;
585 session->ctrl.ctr_read_proc = cmtp_ctr_read_proc; 590 session->ctrl.proc_fops = &cmtp_proc_fops;
586 591
587 if (attach_capi_ctr(&session->ctrl) < 0) { 592 if (attach_capi_ctr(&session->ctrl) < 0) {
588 BT_ERR("Can't attach new controller"); 593 BT_ERR("Can't attach new controller");
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a2cbe61f6e6..7bc0604069c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -467,7 +467,7 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
467 return 0; 467 return 0;
468} 468}
469 469
470void br_net_exit(struct net *net) 470void __net_exit br_net_exit(struct net *net)
471{ 471{
472 struct net_device *dev; 472 struct net_device *dev;
473 LIST_HEAD(list); 473 LIST_HEAD(list);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 51adc4c2b86..702be5a2c95 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -77,8 +77,8 @@ static int stats_timer __read_mostly = 1;
77module_param(stats_timer, int, S_IRUGO); 77module_param(stats_timer, int, S_IRUGO);
78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); 78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
79 79
80HLIST_HEAD(can_rx_dev_list); 80/* receive filters subscribed for 'all' CAN devices */
81static struct dev_rcv_lists can_rx_alldev_list; 81struct dev_rcv_lists can_rx_alldev_list;
82static DEFINE_SPINLOCK(can_rcvlists_lock); 82static DEFINE_SPINLOCK(can_rcvlists_lock);
83 83
84static struct kmem_cache *rcv_cache __read_mostly; 84static struct kmem_cache *rcv_cache __read_mostly;
@@ -292,28 +292,10 @@ EXPORT_SYMBOL(can_send);
292 292
293static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) 293static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
294{ 294{
295 struct dev_rcv_lists *d = NULL; 295 if (!dev)
296 struct hlist_node *n; 296 return &can_rx_alldev_list;
297 297 else
298 /* 298 return (struct dev_rcv_lists *)dev->ml_priv;
299 * find receive list for this device
300 *
301 * The hlist_for_each_entry*() macros curse through the list
302 * using the pointer variable n and set d to the containing
303 * struct in each list iteration. Therefore, after list
304 * iteration, d is unmodified when the list is empty, and it
305 * points to last list element, when the list is non-empty
306 * but no match in the loop body is found. I.e. d is *not*
307 * NULL when no match is found. We can, however, use the
308 * cursor variable n to decide if a match was found.
309 */
310
311 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
312 if (d->dev == dev)
313 break;
314 }
315
316 return n ? d : NULL;
317} 299}
318 300
319/** 301/**
@@ -433,6 +415,9 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
433 415
434 /* insert new receiver (dev,canid,mask) -> (func,data) */ 416 /* insert new receiver (dev,canid,mask) -> (func,data) */
435 417
418 if (dev && dev->type != ARPHRD_CAN)
419 return -ENODEV;
420
436 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); 421 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
437 if (!r) 422 if (!r)
438 return -ENOMEM; 423 return -ENOMEM;
@@ -468,16 +453,6 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
468EXPORT_SYMBOL(can_rx_register); 453EXPORT_SYMBOL(can_rx_register);
469 454
470/* 455/*
471 * can_rx_delete_device - rcu callback for dev_rcv_lists structure removal
472 */
473static void can_rx_delete_device(struct rcu_head *rp)
474{
475 struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu);
476
477 kfree(d);
478}
479
480/*
481 * can_rx_delete_receiver - rcu callback for single receiver entry removal 456 * can_rx_delete_receiver - rcu callback for single receiver entry removal
482 */ 457 */
483static void can_rx_delete_receiver(struct rcu_head *rp) 458static void can_rx_delete_receiver(struct rcu_head *rp)
@@ -506,6 +481,9 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
506 struct hlist_node *next; 481 struct hlist_node *next;
507 struct dev_rcv_lists *d; 482 struct dev_rcv_lists *d;
508 483
484 if (dev && dev->type != ARPHRD_CAN)
485 return;
486
509 spin_lock(&can_rcvlists_lock); 487 spin_lock(&can_rcvlists_lock);
510 488
511 d = find_dev_rcv_lists(dev); 489 d = find_dev_rcv_lists(dev);
@@ -541,7 +519,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
541 "dev %s, id %03X, mask %03X\n", 519 "dev %s, id %03X, mask %03X\n",
542 DNAME(dev), can_id, mask); 520 DNAME(dev), can_id, mask);
543 r = NULL; 521 r = NULL;
544 d = NULL;
545 goto out; 522 goto out;
546 } 523 }
547 524
@@ -552,10 +529,10 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
552 can_pstats.rcv_entries--; 529 can_pstats.rcv_entries--;
553 530
554 /* remove device structure requested by NETDEV_UNREGISTER */ 531 /* remove device structure requested by NETDEV_UNREGISTER */
555 if (d->remove_on_zero_entries && !d->entries) 532 if (d->remove_on_zero_entries && !d->entries) {
556 hlist_del_rcu(&d->list); 533 kfree(d);
557 else 534 dev->ml_priv = NULL;
558 d = NULL; 535 }
559 536
560 out: 537 out:
561 spin_unlock(&can_rcvlists_lock); 538 spin_unlock(&can_rcvlists_lock);
@@ -563,10 +540,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
563 /* schedule the receiver item for deletion */ 540 /* schedule the receiver item for deletion */
564 if (r) 541 if (r)
565 call_rcu(&r->rcu, can_rx_delete_receiver); 542 call_rcu(&r->rcu, can_rx_delete_receiver);
566
567 /* schedule the device structure for deletion */
568 if (d)
569 call_rcu(&d->rcu, can_rx_delete_device);
570} 543}
571EXPORT_SYMBOL(can_rx_unregister); 544EXPORT_SYMBOL(can_rx_unregister);
572 545
@@ -780,48 +753,35 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
780 753
781 case NETDEV_REGISTER: 754 case NETDEV_REGISTER:
782 755
783 /* 756 /* create new dev_rcv_lists for this device */
784 * create new dev_rcv_lists for this device
785 *
786 * N.B. zeroing the struct is the correct initialization
787 * for the embedded hlist_head structs.
788 * Another list type, e.g. list_head, would require
789 * explicit initialization.
790 */
791
792 d = kzalloc(sizeof(*d), GFP_KERNEL); 757 d = kzalloc(sizeof(*d), GFP_KERNEL);
793 if (!d) { 758 if (!d) {
794 printk(KERN_ERR 759 printk(KERN_ERR
795 "can: allocation of receive list failed\n"); 760 "can: allocation of receive list failed\n");
796 return NOTIFY_DONE; 761 return NOTIFY_DONE;
797 } 762 }
798 d->dev = dev; 763 BUG_ON(dev->ml_priv);
799 764 dev->ml_priv = d;
800 spin_lock(&can_rcvlists_lock);
801 hlist_add_head_rcu(&d->list, &can_rx_dev_list);
802 spin_unlock(&can_rcvlists_lock);
803 765
804 break; 766 break;
805 767
806 case NETDEV_UNREGISTER: 768 case NETDEV_UNREGISTER:
807 spin_lock(&can_rcvlists_lock); 769 spin_lock(&can_rcvlists_lock);
808 770
809 d = find_dev_rcv_lists(dev); 771 d = dev->ml_priv;
810 if (d) { 772 if (d) {
811 if (d->entries) { 773 if (d->entries)
812 d->remove_on_zero_entries = 1; 774 d->remove_on_zero_entries = 1;
813 d = NULL; 775 else {
814 } else 776 kfree(d);
815 hlist_del_rcu(&d->list); 777 dev->ml_priv = NULL;
778 }
816 } else 779 } else
817 printk(KERN_ERR "can: notifier: receive list not " 780 printk(KERN_ERR "can: notifier: receive list not "
818 "found for dev %s\n", dev->name); 781 "found for dev %s\n", dev->name);
819 782
820 spin_unlock(&can_rcvlists_lock); 783 spin_unlock(&can_rcvlists_lock);
821 784
822 if (d)
823 call_rcu(&d->rcu, can_rx_delete_device);
824
825 break; 785 break;
826 } 786 }
827 787
@@ -853,21 +813,13 @@ static __init int can_init(void)
853{ 813{
854 printk(banner); 814 printk(banner);
855 815
816 memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
817
856 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 818 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
857 0, 0, NULL); 819 0, 0, NULL);
858 if (!rcv_cache) 820 if (!rcv_cache)
859 return -ENOMEM; 821 return -ENOMEM;
860 822
861 /*
862 * Insert can_rx_alldev_list for reception on all devices.
863 * This struct is zero initialized which is correct for the
864 * embedded hlist heads, the dev pointer, and the entries counter.
865 */
866
867 spin_lock(&can_rcvlists_lock);
868 hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list);
869 spin_unlock(&can_rcvlists_lock);
870
871 if (stats_timer) { 823 if (stats_timer) {
872 /* the statistics are updated every second (timer triggered) */ 824 /* the statistics are updated every second (timer triggered) */
873 setup_timer(&can_stattimer, can_stat_update, 0); 825 setup_timer(&can_stattimer, can_stat_update, 0);
@@ -887,8 +839,7 @@ static __init int can_init(void)
887 839
888static __exit void can_exit(void) 840static __exit void can_exit(void)
889{ 841{
890 struct dev_rcv_lists *d; 842 struct net_device *dev;
891 struct hlist_node *n, *next;
892 843
893 if (stats_timer) 844 if (stats_timer)
894 del_timer(&can_stattimer); 845 del_timer(&can_stattimer);
@@ -900,14 +851,19 @@ static __exit void can_exit(void)
900 unregister_netdevice_notifier(&can_netdev_notifier); 851 unregister_netdevice_notifier(&can_netdev_notifier);
901 sock_unregister(PF_CAN); 852 sock_unregister(PF_CAN);
902 853
903 /* remove can_rx_dev_list */ 854 /* remove created dev_rcv_lists from still registered CAN devices */
904 spin_lock(&can_rcvlists_lock); 855 rcu_read_lock();
905 hlist_del(&can_rx_alldev_list.list); 856 for_each_netdev_rcu(&init_net, dev) {
906 hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) { 857 if (dev->type == ARPHRD_CAN && dev->ml_priv){
907 hlist_del(&d->list); 858
908 kfree(d); 859 struct dev_rcv_lists *d = dev->ml_priv;
860
861 BUG_ON(d->entries);
862 kfree(d);
863 dev->ml_priv = NULL;
864 }
909 } 865 }
910 spin_unlock(&can_rcvlists_lock); 866 rcu_read_unlock();
911 867
912 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 868 rcu_barrier(); /* Wait for completion of call_rcu()'s */
913 869
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 18f91e37cc3..34253b84e30 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -63,10 +63,8 @@ struct receiver {
63 63
64enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX }; 64enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
65 65
66/* per device receive filters linked at dev->ml_priv */
66struct dev_rcv_lists { 67struct dev_rcv_lists {
67 struct hlist_node list;
68 struct rcu_head rcu;
69 struct net_device *dev;
70 struct hlist_head rx[RX_MAX]; 68 struct hlist_head rx[RX_MAX];
71 struct hlist_head rx_sff[0x800]; 69 struct hlist_head rx_sff[0x800];
72 int remove_on_zero_entries; 70 int remove_on_zero_entries;
diff --git a/net/can/proc.c b/net/can/proc.c
index 9b9ad29be56..f4265cc9c3f 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -45,6 +45,7 @@
45#include <linux/proc_fs.h> 45#include <linux/proc_fs.h>
46#include <linux/list.h> 46#include <linux/list.h>
47#include <linux/rcupdate.h> 47#include <linux/rcupdate.h>
48#include <linux/if_arp.h>
48#include <linux/can/core.h> 49#include <linux/can/core.h>
49 50
50#include "af_can.h" 51#include "af_can.h"
@@ -84,6 +85,9 @@ static const char rx_list_name[][8] = {
84 [RX_EFF] = "rx_eff", 85 [RX_EFF] = "rx_eff",
85}; 86};
86 87
88/* receive filters subscribed for 'all' CAN devices */
89extern struct dev_rcv_lists can_rx_alldev_list;
90
87/* 91/*
88 * af_can statistics stuff 92 * af_can statistics stuff
89 */ 93 */
@@ -190,10 +194,6 @@ void can_stat_update(unsigned long data)
190 194
191/* 195/*
192 * proc read functions 196 * proc read functions
193 *
194 * From known use-cases we expect about 10 entries in a receive list to be
195 * printed in the proc_fs. So PAGE_SIZE is definitely enough space here.
196 *
197 */ 197 */
198 198
199static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, 199static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
@@ -202,7 +202,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
202 struct receiver *r; 202 struct receiver *r;
203 struct hlist_node *n; 203 struct hlist_node *n;
204 204
205 rcu_read_lock();
206 hlist_for_each_entry_rcu(r, n, rx_list, list) { 205 hlist_for_each_entry_rcu(r, n, rx_list, list) {
207 char *fmt = (r->can_id & CAN_EFF_FLAG)? 206 char *fmt = (r->can_id & CAN_EFF_FLAG)?
208 " %-5s %08X %08x %08x %08x %8ld %s\n" : 207 " %-5s %08X %08x %08x %08x %8ld %s\n" :
@@ -212,7 +211,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
212 (unsigned long)r->func, (unsigned long)r->data, 211 (unsigned long)r->func, (unsigned long)r->data,
213 r->matches, r->ident); 212 r->matches, r->ident);
214 } 213 }
215 rcu_read_unlock();
216} 214}
217 215
218static void can_print_recv_banner(struct seq_file *m) 216static void can_print_recv_banner(struct seq_file *m)
@@ -346,24 +344,39 @@ static const struct file_operations can_version_proc_fops = {
346 .release = single_release, 344 .release = single_release,
347}; 345};
348 346
347static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
348 struct net_device *dev,
349 struct dev_rcv_lists *d)
350{
351 if (!hlist_empty(&d->rx[idx])) {
352 can_print_recv_banner(m);
353 can_print_rcvlist(m, &d->rx[idx], dev);
354 } else
355 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
356
357}
358
349static int can_rcvlist_proc_show(struct seq_file *m, void *v) 359static int can_rcvlist_proc_show(struct seq_file *m, void *v)
350{ 360{
351 /* double cast to prevent GCC warning */ 361 /* double cast to prevent GCC warning */
352 int idx = (int)(long)m->private; 362 int idx = (int)(long)m->private;
363 struct net_device *dev;
353 struct dev_rcv_lists *d; 364 struct dev_rcv_lists *d;
354 struct hlist_node *n;
355 365
356 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]); 366 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
357 367
358 rcu_read_lock(); 368 rcu_read_lock();
359 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
360 369
361 if (!hlist_empty(&d->rx[idx])) { 370 /* receive list for 'all' CAN devices (dev == NULL) */
362 can_print_recv_banner(m); 371 d = &can_rx_alldev_list;
363 can_print_rcvlist(m, &d->rx[idx], d->dev); 372 can_rcvlist_proc_show_one(m, idx, NULL, d);
364 } else 373
365 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev)); 374 /* receive list for registered CAN devices */
375 for_each_netdev_rcu(&init_net, dev) {
376 if (dev->type == ARPHRD_CAN && dev->ml_priv)
377 can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
366 } 378 }
379
367 rcu_read_unlock(); 380 rcu_read_unlock();
368 381
369 seq_putc(m, '\n'); 382 seq_putc(m, '\n');
@@ -383,34 +396,50 @@ static const struct file_operations can_rcvlist_proc_fops = {
383 .release = single_release, 396 .release = single_release,
384}; 397};
385 398
399static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
400 struct net_device *dev,
401 struct dev_rcv_lists *d)
402{
403 int i;
404 int all_empty = 1;
405
406 /* check wether at least one list is non-empty */
407 for (i = 0; i < 0x800; i++)
408 if (!hlist_empty(&d->rx_sff[i])) {
409 all_empty = 0;
410 break;
411 }
412
413 if (!all_empty) {
414 can_print_recv_banner(m);
415 for (i = 0; i < 0x800; i++) {
416 if (!hlist_empty(&d->rx_sff[i]))
417 can_print_rcvlist(m, &d->rx_sff[i], dev);
418 }
419 } else
420 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
421}
422
386static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) 423static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
387{ 424{
425 struct net_device *dev;
388 struct dev_rcv_lists *d; 426 struct dev_rcv_lists *d;
389 struct hlist_node *n;
390 427
391 /* RX_SFF */ 428 /* RX_SFF */
392 seq_puts(m, "\nreceive list 'rx_sff':\n"); 429 seq_puts(m, "\nreceive list 'rx_sff':\n");
393 430
394 rcu_read_lock(); 431 rcu_read_lock();
395 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { 432
396 int i, all_empty = 1; 433 /* sff receive list for 'all' CAN devices (dev == NULL) */
397 /* check wether at least one list is non-empty */ 434 d = &can_rx_alldev_list;
398 for (i = 0; i < 0x800; i++) 435 can_rcvlist_sff_proc_show_one(m, NULL, d);
399 if (!hlist_empty(&d->rx_sff[i])) { 436
400 all_empty = 0; 437 /* sff receive list for registered CAN devices */
401 break; 438 for_each_netdev_rcu(&init_net, dev) {
402 } 439 if (dev->type == ARPHRD_CAN && dev->ml_priv)
403 440 can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv);
404 if (!all_empty) {
405 can_print_recv_banner(m);
406 for (i = 0; i < 0x800; i++) {
407 if (!hlist_empty(&d->rx_sff[i]))
408 can_print_rcvlist(m, &d->rx_sff[i],
409 d->dev);
410 }
411 } else
412 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
413 } 441 }
442
414 rcu_read_unlock(); 443 rcu_read_unlock();
415 444
416 seq_putc(m, '\n'); 445 seq_putc(m, '\n');
diff --git a/net/core/dev.c b/net/core/dev.c
index be9924f60ec..2cba5c521e5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1853,6 +1853,14 @@ gso:
1853 1853
1854 skb->next = nskb->next; 1854 skb->next = nskb->next;
1855 nskb->next = NULL; 1855 nskb->next = NULL;
1856
1857 /*
1858 * If device doesnt need nskb->dst, release it right now while
1859 * its hot in this cpu cache
1860 */
1861 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1862 skb_dst_drop(nskb);
1863
1856 rc = ops->ndo_start_xmit(nskb, dev); 1864 rc = ops->ndo_start_xmit(nskb, dev);
1857 if (unlikely(rc != NETDEV_TX_OK)) { 1865 if (unlikely(rc != NETDEV_TX_OK)) {
1858 if (rc & ~NETDEV_TX_MASK) 1866 if (rc & ~NETDEV_TX_MASK)
@@ -1974,6 +1982,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1974 return rc; 1982 return rc;
1975} 1983}
1976 1984
1985/*
1986 * Returns true if either:
1987 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
1988 * 2. skb is fragmented and the device does not support SG, or if
1989 * at least one of fragments is in highmem and device does not
1990 * support DMA from it.
1991 */
1992static inline int skb_needs_linearize(struct sk_buff *skb,
1993 struct net_device *dev)
1994{
1995 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
1996 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1997 illegal_highdma(dev, skb)));
1998}
1999
1977/** 2000/**
1978 * dev_queue_xmit - transmit a buffer 2001 * dev_queue_xmit - transmit a buffer
1979 * @skb: buffer to transmit 2002 * @skb: buffer to transmit
@@ -2010,18 +2033,8 @@ int dev_queue_xmit(struct sk_buff *skb)
2010 if (netif_needs_gso(dev, skb)) 2033 if (netif_needs_gso(dev, skb))
2011 goto gso; 2034 goto gso;
2012 2035
2013 if (skb_has_frags(skb) && 2036 /* Convert a paged skb to linear, if required */
2014 !(dev->features & NETIF_F_FRAGLIST) && 2037 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2015 __skb_linearize(skb))
2016 goto out_kfree_skb;
2017
2018 /* Fragmented skb is linearized if device does not support SG,
2019 * or if at least one of fragments is in highmem and device
2020 * does not support DMA from it.
2021 */
2022 if (skb_shinfo(skb)->nr_frags &&
2023 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
2024 __skb_linearize(skb))
2025 goto out_kfree_skb; 2038 goto out_kfree_skb;
2026 2039
2027 /* If packet is not checksummed and device does not support 2040 /* If packet is not checksummed and device does not support
@@ -2422,6 +2435,7 @@ int netif_receive_skb(struct sk_buff *skb)
2422 struct packet_type *ptype, *pt_prev; 2435 struct packet_type *ptype, *pt_prev;
2423 struct net_device *orig_dev; 2436 struct net_device *orig_dev;
2424 struct net_device *null_or_orig; 2437 struct net_device *null_or_orig;
2438 struct net_device *null_or_bond;
2425 int ret = NET_RX_DROP; 2439 int ret = NET_RX_DROP;
2426 __be16 type; 2440 __be16 type;
2427 2441
@@ -2487,12 +2501,24 @@ ncls:
2487 if (!skb) 2501 if (!skb)
2488 goto out; 2502 goto out;
2489 2503
2504 /*
2505 * Make sure frames received on VLAN interfaces stacked on
2506 * bonding interfaces still make their way to any base bonding
2507 * device that may have registered for a specific ptype. The
2508 * handler may have to adjust skb->dev and orig_dev.
2509 */
2510 null_or_bond = NULL;
2511 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2512 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2513 null_or_bond = vlan_dev_real_dev(skb->dev);
2514 }
2515
2490 type = skb->protocol; 2516 type = skb->protocol;
2491 list_for_each_entry_rcu(ptype, 2517 list_for_each_entry_rcu(ptype,
2492 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2518 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2493 if (ptype->type == type && 2519 if (ptype->type == type && (ptype->dev == null_or_orig ||
2494 (ptype->dev == null_or_orig || ptype->dev == skb->dev || 2520 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2495 ptype->dev == orig_dev)) { 2521 ptype->dev == null_or_bond)) {
2496 if (pt_prev) 2522 if (pt_prev)
2497 ret = deliver_skb(skb, pt_prev, orig_dev); 2523 ret = deliver_skb(skb, pt_prev, orig_dev);
2498 pt_prev = ptype; 2524 pt_prev = ptype;
@@ -2561,7 +2587,7 @@ out:
2561 return netif_receive_skb(skb); 2587 return netif_receive_skb(skb);
2562} 2588}
2563 2589
2564void napi_gro_flush(struct napi_struct *napi) 2590static void napi_gro_flush(struct napi_struct *napi)
2565{ 2591{
2566 struct sk_buff *skb, *next; 2592 struct sk_buff *skb, *next;
2567 2593
@@ -2574,7 +2600,6 @@ void napi_gro_flush(struct napi_struct *napi)
2574 napi->gro_count = 0; 2600 napi->gro_count = 0;
2575 napi->gro_list = NULL; 2601 napi->gro_list = NULL;
2576} 2602}
2577EXPORT_SYMBOL(napi_gro_flush);
2578 2603
2579enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2604enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2580{ 2605{
@@ -3185,7 +3210,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3185{ 3210{
3186 const struct net_device_stats *stats = dev_get_stats(dev); 3211 const struct net_device_stats *stats = dev_get_stats(dev);
3187 3212
3188 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " 3213 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3189 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", 3214 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3190 dev->name, stats->rx_bytes, stats->rx_packets, 3215 dev->name, stats->rx_bytes, stats->rx_packets,
3191 stats->rx_errors, 3216 stats->rx_errors,
@@ -3640,10 +3665,10 @@ void __dev_set_rx_mode(struct net_device *dev)
3640 /* Unicast addresses changes may only happen under the rtnl, 3665 /* Unicast addresses changes may only happen under the rtnl,
3641 * therefore calling __dev_set_promiscuity here is safe. 3666 * therefore calling __dev_set_promiscuity here is safe.
3642 */ 3667 */
3643 if (dev->uc.count > 0 && !dev->uc_promisc) { 3668 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
3644 __dev_set_promiscuity(dev, 1); 3669 __dev_set_promiscuity(dev, 1);
3645 dev->uc_promisc = 1; 3670 dev->uc_promisc = 1;
3646 } else if (dev->uc.count == 0 && dev->uc_promisc) { 3671 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
3647 __dev_set_promiscuity(dev, -1); 3672 __dev_set_promiscuity(dev, -1);
3648 dev->uc_promisc = 0; 3673 dev->uc_promisc = 0;
3649 } 3674 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 02a3b2c69c1..9a24377146b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -708,7 +708,7 @@ static struct notifier_block fib_rules_notifier = {
708 .notifier_call = fib_rules_event, 708 .notifier_call = fib_rules_event,
709}; 709};
710 710
711static int fib_rules_net_init(struct net *net) 711static int __net_init fib_rules_net_init(struct net *net)
712{ 712{
713 INIT_LIST_HEAD(&net->rules_ops); 713 INIT_LIST_HEAD(&net->rules_ops);
714 spin_lock_init(&net->rules_mod_lock); 714 spin_lock_init(&net->rules_mod_lock);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f35377b643e..f2efd72da79 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2417,8 +2417,7 @@ EXPORT_SYMBOL(neigh_seq_stop);
2417 2417
2418static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 2418static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2419{ 2419{
2420 struct proc_dir_entry *pde = seq->private; 2420 struct neigh_table *tbl = seq->private;
2421 struct neigh_table *tbl = pde->data;
2422 int cpu; 2421 int cpu;
2423 2422
2424 if (*pos == 0) 2423 if (*pos == 0)
@@ -2435,8 +2434,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2435 2434
2436static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2435static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2437{ 2436{
2438 struct proc_dir_entry *pde = seq->private; 2437 struct neigh_table *tbl = seq->private;
2439 struct neigh_table *tbl = pde->data;
2440 int cpu; 2438 int cpu;
2441 2439
2442 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 2440 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
@@ -2455,8 +2453,7 @@ static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2455 2453
2456static int neigh_stat_seq_show(struct seq_file *seq, void *v) 2454static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2457{ 2455{
2458 struct proc_dir_entry *pde = seq->private; 2456 struct neigh_table *tbl = seq->private;
2459 struct neigh_table *tbl = pde->data;
2460 struct neigh_statistics *st = v; 2457 struct neigh_statistics *st = v;
2461 2458
2462 if (v == SEQ_START_TOKEN) { 2459 if (v == SEQ_START_TOKEN) {
@@ -2501,7 +2498,7 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2501 2498
2502 if (!ret) { 2499 if (!ret) {
2503 struct seq_file *sf = file->private_data; 2500 struct seq_file *sf = file->private_data;
2504 sf->private = PDE(inode); 2501 sf->private = PDE(inode)->data;
2505 } 2502 }
2506 return ret; 2503 return ret;
2507}; 2504};
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 0b4d0d35ef4..7aa69725376 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -407,11 +407,24 @@ static void arp_reply(struct sk_buff *skb)
407 __be32 sip, tip; 407 __be32 sip, tip;
408 unsigned char *sha; 408 unsigned char *sha;
409 struct sk_buff *send_skb; 409 struct sk_buff *send_skb;
410 struct netpoll *np = NULL; 410 struct netpoll *np, *tmp;
411 unsigned long flags;
412 int hits = 0;
413
414 if (list_empty(&npinfo->rx_np))
415 return;
416
417 /* Before checking the packet, we do some early
418 inspection whether this is interesting at all */
419 spin_lock_irqsave(&npinfo->rx_lock, flags);
420 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
421 if (np->dev == skb->dev)
422 hits++;
423 }
424 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
411 425
412 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 426 /* No netpoll struct is using this dev */
413 np = npinfo->rx_np; 427 if (!hits)
414 if (!np)
415 return; 428 return;
416 429
417 /* No arp on this interface */ 430 /* No arp on this interface */
@@ -437,77 +450,91 @@ static void arp_reply(struct sk_buff *skb)
437 arp_ptr += skb->dev->addr_len; 450 arp_ptr += skb->dev->addr_len;
438 memcpy(&sip, arp_ptr, 4); 451 memcpy(&sip, arp_ptr, 4);
439 arp_ptr += 4; 452 arp_ptr += 4;
440 /* if we actually cared about dst hw addr, it would get copied here */ 453 /* If we actually cared about dst hw addr,
454 it would get copied here */
441 arp_ptr += skb->dev->addr_len; 455 arp_ptr += skb->dev->addr_len;
442 memcpy(&tip, arp_ptr, 4); 456 memcpy(&tip, arp_ptr, 4);
443 457
444 /* Should we ignore arp? */ 458 /* Should we ignore arp? */
445 if (tip != np->local_ip || 459 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
446 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
447 return; 460 return;
448 461
449 size = arp_hdr_len(skb->dev); 462 size = arp_hdr_len(skb->dev);
450 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
451 LL_RESERVED_SPACE(np->dev));
452 463
453 if (!send_skb) 464 spin_lock_irqsave(&npinfo->rx_lock, flags);
454 return; 465 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
455 466 if (tip != np->local_ip)
456 skb_reset_network_header(send_skb); 467 continue;
457 arp = (struct arphdr *) skb_put(send_skb, size);
458 send_skb->dev = skb->dev;
459 send_skb->protocol = htons(ETH_P_ARP);
460 468
461 /* Fill the device header for the ARP frame */ 469 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
462 if (dev_hard_header(send_skb, skb->dev, ptype, 470 LL_RESERVED_SPACE(np->dev));
463 sha, np->dev->dev_addr, 471 if (!send_skb)
464 send_skb->len) < 0) { 472 continue;
465 kfree_skb(send_skb);
466 return;
467 }
468 473
469 /* 474 skb_reset_network_header(send_skb);
470 * Fill out the arp protocol part. 475 arp = (struct arphdr *) skb_put(send_skb, size);
471 * 476 send_skb->dev = skb->dev;
472 * we only support ethernet device type, 477 send_skb->protocol = htons(ETH_P_ARP);
473 * which (according to RFC 1390) should always equal 1 (Ethernet).
474 */
475 478
476 arp->ar_hrd = htons(np->dev->type); 479 /* Fill the device header for the ARP frame */
477 arp->ar_pro = htons(ETH_P_IP); 480 if (dev_hard_header(send_skb, skb->dev, ptype,
478 arp->ar_hln = np->dev->addr_len; 481 sha, np->dev->dev_addr,
479 arp->ar_pln = 4; 482 send_skb->len) < 0) {
480 arp->ar_op = htons(type); 483 kfree_skb(send_skb);
484 continue;
485 }
481 486
482 arp_ptr=(unsigned char *)(arp + 1); 487 /*
483 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); 488 * Fill out the arp protocol part.
484 arp_ptr += np->dev->addr_len; 489 *
485 memcpy(arp_ptr, &tip, 4); 490 * we only support ethernet device type,
486 arp_ptr += 4; 491 * which (according to RFC 1390) should
487 memcpy(arp_ptr, sha, np->dev->addr_len); 492 * always equal 1 (Ethernet).
488 arp_ptr += np->dev->addr_len; 493 */
489 memcpy(arp_ptr, &sip, 4);
490 494
491 netpoll_send_skb(np, send_skb); 495 arp->ar_hrd = htons(np->dev->type);
496 arp->ar_pro = htons(ETH_P_IP);
497 arp->ar_hln = np->dev->addr_len;
498 arp->ar_pln = 4;
499 arp->ar_op = htons(type);
500
501 arp_ptr = (unsigned char *)(arp + 1);
502 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
503 arp_ptr += np->dev->addr_len;
504 memcpy(arp_ptr, &tip, 4);
505 arp_ptr += 4;
506 memcpy(arp_ptr, sha, np->dev->addr_len);
507 arp_ptr += np->dev->addr_len;
508 memcpy(arp_ptr, &sip, 4);
509
510 netpoll_send_skb(np, send_skb);
511
512 /* If there are several rx_hooks for the same address,
513 we're fine by sending a single reply */
514 break;
515 }
516 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
492} 517}
493 518
494int __netpoll_rx(struct sk_buff *skb) 519int __netpoll_rx(struct sk_buff *skb)
495{ 520{
496 int proto, len, ulen; 521 int proto, len, ulen;
522 int hits = 0;
497 struct iphdr *iph; 523 struct iphdr *iph;
498 struct udphdr *uh; 524 struct udphdr *uh;
499 struct netpoll_info *npi = skb->dev->npinfo; 525 struct netpoll_info *npinfo = skb->dev->npinfo;
500 struct netpoll *np = npi->rx_np; 526 struct netpoll *np, *tmp;
501 527
502 if (!np) 528 if (list_empty(&npinfo->rx_np))
503 goto out; 529 goto out;
530
504 if (skb->dev->type != ARPHRD_ETHER) 531 if (skb->dev->type != ARPHRD_ETHER)
505 goto out; 532 goto out;
506 533
507 /* check if netpoll clients need ARP */ 534 /* check if netpoll clients need ARP */
508 if (skb->protocol == htons(ETH_P_ARP) && 535 if (skb->protocol == htons(ETH_P_ARP) &&
509 atomic_read(&trapped)) { 536 atomic_read(&trapped)) {
510 skb_queue_tail(&npi->arp_tx, skb); 537 skb_queue_tail(&npinfo->arp_tx, skb);
511 return 1; 538 return 1;
512 } 539 }
513 540
@@ -551,16 +578,23 @@ int __netpoll_rx(struct sk_buff *skb)
551 goto out; 578 goto out;
552 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) 579 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
553 goto out; 580 goto out;
554 if (np->local_ip && np->local_ip != iph->daddr)
555 goto out;
556 if (np->remote_ip && np->remote_ip != iph->saddr)
557 goto out;
558 if (np->local_port && np->local_port != ntohs(uh->dest))
559 goto out;
560 581
561 np->rx_hook(np, ntohs(uh->source), 582 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
562 (char *)(uh+1), 583 if (np->local_ip && np->local_ip != iph->daddr)
563 ulen - sizeof(struct udphdr)); 584 continue;
585 if (np->remote_ip && np->remote_ip != iph->saddr)
586 continue;
587 if (np->local_port && np->local_port != ntohs(uh->dest))
588 continue;
589
590 np->rx_hook(np, ntohs(uh->source),
591 (char *)(uh+1),
592 ulen - sizeof(struct udphdr));
593 hits++;
594 }
595
596 if (!hits)
597 goto out;
564 598
565 kfree_skb(skb); 599 kfree_skb(skb);
566 return 1; 600 return 1;
@@ -684,6 +718,7 @@ int netpoll_setup(struct netpoll *np)
684 struct net_device *ndev = NULL; 718 struct net_device *ndev = NULL;
685 struct in_device *in_dev; 719 struct in_device *in_dev;
686 struct netpoll_info *npinfo; 720 struct netpoll_info *npinfo;
721 struct netpoll *npe, *tmp;
687 unsigned long flags; 722 unsigned long flags;
688 int err; 723 int err;
689 724
@@ -704,7 +739,7 @@ int netpoll_setup(struct netpoll *np)
704 } 739 }
705 740
706 npinfo->rx_flags = 0; 741 npinfo->rx_flags = 0;
707 npinfo->rx_np = NULL; 742 INIT_LIST_HEAD(&npinfo->rx_np);
708 743
709 spin_lock_init(&npinfo->rx_lock); 744 spin_lock_init(&npinfo->rx_lock);
710 skb_queue_head_init(&npinfo->arp_tx); 745 skb_queue_head_init(&npinfo->arp_tx);
@@ -785,7 +820,7 @@ int netpoll_setup(struct netpoll *np)
785 if (np->rx_hook) { 820 if (np->rx_hook) {
786 spin_lock_irqsave(&npinfo->rx_lock, flags); 821 spin_lock_irqsave(&npinfo->rx_lock, flags);
787 npinfo->rx_flags |= NETPOLL_RX_ENABLED; 822 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
788 npinfo->rx_np = np; 823 list_add_tail(&np->rx, &npinfo->rx_np);
789 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 824 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
790 } 825 }
791 826
@@ -801,9 +836,16 @@ int netpoll_setup(struct netpoll *np)
801 return 0; 836 return 0;
802 837
803 release: 838 release:
804 if (!ndev->npinfo) 839 if (!ndev->npinfo) {
840 spin_lock_irqsave(&npinfo->rx_lock, flags);
841 list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
842 npe->dev = NULL;
843 }
844 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
845
805 kfree(npinfo); 846 kfree(npinfo);
806 np->dev = NULL; 847 }
848
807 dev_put(ndev); 849 dev_put(ndev);
808 return err; 850 return err;
809} 851}
@@ -823,10 +865,11 @@ void netpoll_cleanup(struct netpoll *np)
823 if (np->dev) { 865 if (np->dev) {
824 npinfo = np->dev->npinfo; 866 npinfo = np->dev->npinfo;
825 if (npinfo) { 867 if (npinfo) {
826 if (npinfo->rx_np == np) { 868 if (!list_empty(&npinfo->rx_np)) {
827 spin_lock_irqsave(&npinfo->rx_lock, flags); 869 spin_lock_irqsave(&npinfo->rx_lock, flags);
828 npinfo->rx_np = NULL; 870 list_del(&np->rx);
829 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; 871 if (list_empty(&npinfo->rx_np))
872 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
830 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 873 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
831 } 874 }
832 875
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 794bcb897ff..62f3878a601 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1386,7 +1386,7 @@ static struct notifier_block rtnetlink_dev_notifier = {
1386}; 1386};
1387 1387
1388 1388
1389static int rtnetlink_net_init(struct net *net) 1389static int __net_init rtnetlink_net_init(struct net *net)
1390{ 1390{
1391 struct sock *sk; 1391 struct sock *sk;
1392 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 1392 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
@@ -1397,7 +1397,7 @@ static int rtnetlink_net_init(struct net *net)
1397 return 0; 1397 return 0;
1398} 1398}
1399 1399
1400static void rtnetlink_net_exit(struct net *net) 1400static void __net_exit rtnetlink_net_exit(struct net *net)
1401{ 1401{
1402 netlink_kernel_release(net->rtnl); 1402 netlink_kernel_release(net->rtnl);
1403 net->rtnl = NULL; 1403 net->rtnl = NULL;
diff --git a/net/core/sock.c b/net/core/sock.c
index e1f6f225f01..ceef50bd131 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -741,7 +741,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
741 struct timeval tm; 741 struct timeval tm;
742 } v; 742 } v;
743 743
744 unsigned int lv = sizeof(int); 744 int lv = sizeof(int);
745 int len; 745 int len;
746 746
747 if (get_user(len, optlen)) 747 if (get_user(len, optlen))
@@ -2140,13 +2140,13 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
2140} 2140}
2141EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2141EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2142 2142
2143static int sock_inuse_init_net(struct net *net) 2143static int __net_init sock_inuse_init_net(struct net *net)
2144{ 2144{
2145 net->core.inuse = alloc_percpu(struct prot_inuse); 2145 net->core.inuse = alloc_percpu(struct prot_inuse);
2146 return net->core.inuse ? 0 : -ENOMEM; 2146 return net->core.inuse ? 0 : -ENOMEM;
2147} 2147}
2148 2148
2149static void sock_inuse_exit_net(struct net *net) 2149static void __net_exit sock_inuse_exit_net(struct net *net)
2150{ 2150{
2151 free_percpu(net->core.inuse); 2151 free_percpu(net->core.inuse);
2152} 2152}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index dad7bc4878e..b195c4feaa0 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -996,7 +996,7 @@ static struct inet_protosw dccp_v4_protosw = {
996 .flags = INET_PROTOSW_ICSK, 996 .flags = INET_PROTOSW_ICSK,
997}; 997};
998 998
999static int dccp_v4_init_net(struct net *net) 999static int __net_init dccp_v4_init_net(struct net *net)
1000{ 1000{
1001 int err; 1001 int err;
1002 1002
@@ -1005,7 +1005,7 @@ static int dccp_v4_init_net(struct net *net)
1005 return err; 1005 return err;
1006} 1006}
1007 1007
1008static void dccp_v4_exit_net(struct net *net) 1008static void __net_exit dccp_v4_exit_net(struct net *net)
1009{ 1009{
1010 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk); 1010 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
1011} 1011}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index baf05cf43c2..1aec6349e85 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1189,7 +1189,7 @@ static struct inet_protosw dccp_v6_protosw = {
1189 .flags = INET_PROTOSW_ICSK, 1189 .flags = INET_PROTOSW_ICSK,
1190}; 1190};
1191 1191
1192static int dccp_v6_init_net(struct net *net) 1192static int __net_init dccp_v6_init_net(struct net *net)
1193{ 1193{
1194 int err; 1194 int err;
1195 1195
@@ -1198,7 +1198,7 @@ static int dccp_v6_init_net(struct net *net)
1198 return err; 1198 return err;
1199} 1199}
1200 1200
1201static void dccp_v6_exit_net(struct net *net) 1201static void __net_exit dccp_v6_exit_net(struct net *net)
1202{ 1202{
1203 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk); 1203 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1204} 1204}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index dd3db88f8f0..205a1c12f3c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -73,8 +73,8 @@ __setup("ether=", netdev_boot_setup);
73 * @len: packet length (<= skb->len) 73 * @len: packet length (<= skb->len)
74 * 74 *
75 * 75 *
76 * Set the protocol type. For a packet of type ETH_P_802_3 we put the length 76 * Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length
77 * in here instead. It is up to the 802.2 layer to carry protocol information. 77 * in here instead.
78 */ 78 */
79int eth_header(struct sk_buff *skb, struct net_device *dev, 79int eth_header(struct sk_buff *skb, struct net_device *dev,
80 unsigned short type, 80 unsigned short type,
@@ -82,7 +82,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
82{ 82{
83 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); 83 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
84 84
85 if (type != ETH_P_802_3) 85 if (type != ETH_P_802_3 && type != ETH_P_802_2)
86 eth->h_proto = htons(type); 86 eth->h_proto = htons(type);
87 else 87 else
88 eth->h_proto = htons(len); 88 eth->h_proto = htons(len);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c95cd93acf2..1940b4df769 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -70,6 +70,7 @@
70 * bonding can change the skb before 70 * bonding can change the skb before
71 * sending (e.g. insert 8021q tag). 71 * sending (e.g. insert 8021q tag).
72 * Harald Welte : convert to make use of jenkins hash 72 * Harald Welte : convert to make use of jenkins hash
73 * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
73 */ 74 */
74 75
75#include <linux/module.h> 76#include <linux/module.h>
@@ -524,12 +525,15 @@ int arp_bind_neighbour(struct dst_entry *dst)
524/* 525/*
525 * Check if we can use proxy ARP for this path 526 * Check if we can use proxy ARP for this path
526 */ 527 */
527 528static inline int arp_fwd_proxy(struct in_device *in_dev,
528static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt) 529 struct net_device *dev, struct rtable *rt)
529{ 530{
530 struct in_device *out_dev; 531 struct in_device *out_dev;
531 int imi, omi = -1; 532 int imi, omi = -1;
532 533
534 if (rt->u.dst.dev == dev)
535 return 0;
536
533 if (!IN_DEV_PROXY_ARP(in_dev)) 537 if (!IN_DEV_PROXY_ARP(in_dev))
534 return 0; 538 return 0;
535 539
@@ -548,6 +552,43 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
548} 552}
549 553
550/* 554/*
555 * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev)
556 *
557 * RFC3069 supports proxy arp replies back to the same interface. This
558 * is done to support (ethernet) switch features, like RFC 3069, where
559 * the individual ports are not allowed to communicate with each
560 * other, BUT they are allowed to talk to the upstream router. As
561 * described in RFC 3069, it is possible to allow these hosts to
562 * communicate through the upstream router, by proxy_arp'ing.
563 *
564 * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation"
565 *
566 * This technology is known by different names:
567 * In RFC 3069 it is called VLAN Aggregation.
568 * Cisco and Allied Telesyn call it Private VLAN.
569 * Hewlett-Packard call it Source-Port filtering or port-isolation.
570 * Ericsson call it MAC-Forced Forwarding (RFC Draft).
571 *
572 */
573static inline int arp_fwd_pvlan(struct in_device *in_dev,
574 struct net_device *dev, struct rtable *rt,
575 __be32 sip, __be32 tip)
576{
577 /* Private VLAN is only concerned about the same ethernet segment */
578 if (rt->u.dst.dev != dev)
579 return 0;
580
581 /* Don't reply on self probes (often done by windowz boxes)*/
582 if (sip == tip)
583 return 0;
584
585 if (IN_DEV_PROXY_ARP_PVLAN(in_dev))
586 return 1;
587 else
588 return 0;
589}
590
591/*
551 * Interface to link layer: send routine and receive handler. 592 * Interface to link layer: send routine and receive handler.
552 */ 593 */
553 594
@@ -833,8 +874,11 @@ static int arp_process(struct sk_buff *skb)
833 } 874 }
834 goto out; 875 goto out;
835 } else if (IN_DEV_FORWARD(in_dev)) { 876 } else if (IN_DEV_FORWARD(in_dev)) {
836 if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev && 877 if (addr_type == RTN_UNICAST &&
837 (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 878 (arp_fwd_proxy(in_dev, dev, rt) ||
879 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
880 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
881 {
838 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 882 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
839 if (n) 883 if (n)
840 neigh_release(n); 884 neigh_release(n);
@@ -863,7 +907,8 @@ static int arp_process(struct sk_buff *skb)
863 devices (strip is candidate) 907 devices (strip is candidate)
864 */ 908 */
865 if (n == NULL && 909 if (n == NULL &&
866 arp->ar_op == htons(ARPOP_REPLY) && 910 (arp->ar_op == htons(ARPOP_REPLY) ||
911 (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) &&
867 inet_addr_type(net, sip) == RTN_UNICAST) 912 inet_addr_type(net, sip) == RTN_UNICAST)
868 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 913 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
869 } 914 }
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 040c4f05b65..cd71a390839 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1408,6 +1408,7 @@ static struct devinet_sysctl_table {
1408 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), 1408 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
1409 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), 1409 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
1410 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), 1410 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
1411 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
1411 1412
1412 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), 1413 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
1413 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), 1414 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 82dbf711d6d..9b3e28ed524 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -883,7 +883,7 @@ static void nl_fib_input(struct sk_buff *skb)
883 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT); 883 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
884} 884}
885 885
886static int nl_fib_lookup_init(struct net *net) 886static int __net_init nl_fib_lookup_init(struct net *net)
887{ 887{
888 struct sock *sk; 888 struct sock *sk;
889 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0, 889 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
@@ -1004,7 +1004,7 @@ fail:
1004 return err; 1004 return err;
1005} 1005}
1006 1006
1007static void __net_exit ip_fib_net_exit(struct net *net) 1007static void ip_fib_net_exit(struct net *net)
1008{ 1008{
1009 unsigned int i; 1009 unsigned int i;
1010 1010
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ed19aa6919c..96b21011a3e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -62,8 +62,8 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
62#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \ 62#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
63for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) 63for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
64 64
65#define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \ 65#define change_nexthops(fi) { int nhsel; struct fib_nh *nexthop_nh; \
66for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) 66for (nhsel=0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nexthop_nh++, nhsel++)
67 67
68#else /* CONFIG_IP_ROUTE_MULTIPATH */ 68#else /* CONFIG_IP_ROUTE_MULTIPATH */
69 69
@@ -72,7 +72,7 @@ for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++,
72#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \ 72#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \
73for (nhsel=0; nhsel < 1; nhsel++) 73for (nhsel=0; nhsel < 1; nhsel++)
74 74
75#define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \ 75#define change_nexthops(fi) { int nhsel = 0; struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
76for (nhsel=0; nhsel < 1; nhsel++) 76for (nhsel=0; nhsel < 1; nhsel++)
77 77
78#endif /* CONFIG_IP_ROUTE_MULTIPATH */ 78#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -145,9 +145,9 @@ void free_fib_info(struct fib_info *fi)
145 return; 145 return;
146 } 146 }
147 change_nexthops(fi) { 147 change_nexthops(fi) {
148 if (nh->nh_dev) 148 if (nexthop_nh->nh_dev)
149 dev_put(nh->nh_dev); 149 dev_put(nexthop_nh->nh_dev);
150 nh->nh_dev = NULL; 150 nexthop_nh->nh_dev = NULL;
151 } endfor_nexthops(fi); 151 } endfor_nexthops(fi);
152 fib_info_cnt--; 152 fib_info_cnt--;
153 release_net(fi->fib_net); 153 release_net(fi->fib_net);
@@ -162,9 +162,9 @@ void fib_release_info(struct fib_info *fi)
162 if (fi->fib_prefsrc) 162 if (fi->fib_prefsrc)
163 hlist_del(&fi->fib_lhash); 163 hlist_del(&fi->fib_lhash);
164 change_nexthops(fi) { 164 change_nexthops(fi) {
165 if (!nh->nh_dev) 165 if (!nexthop_nh->nh_dev)
166 continue; 166 continue;
167 hlist_del(&nh->nh_hash); 167 hlist_del(&nexthop_nh->nh_hash);
168 } endfor_nexthops(fi) 168 } endfor_nexthops(fi)
169 fi->fib_dead = 1; 169 fi->fib_dead = 1;
170 fib_info_put(fi); 170 fib_info_put(fi);
@@ -395,19 +395,20 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
395 if (!rtnh_ok(rtnh, remaining)) 395 if (!rtnh_ok(rtnh, remaining))
396 return -EINVAL; 396 return -EINVAL;
397 397
398 nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 398 nexthop_nh->nh_flags =
399 nh->nh_oif = rtnh->rtnh_ifindex; 399 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
400 nh->nh_weight = rtnh->rtnh_hops + 1; 400 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
401 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
401 402
402 attrlen = rtnh_attrlen(rtnh); 403 attrlen = rtnh_attrlen(rtnh);
403 if (attrlen > 0) { 404 if (attrlen > 0) {
404 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 405 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
405 406
406 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 407 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
407 nh->nh_gw = nla ? nla_get_be32(nla) : 0; 408 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
408#ifdef CONFIG_NET_CLS_ROUTE 409#ifdef CONFIG_NET_CLS_ROUTE
409 nla = nla_find(attrs, attrlen, RTA_FLOW); 410 nla = nla_find(attrs, attrlen, RTA_FLOW);
410 nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 411 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
411#endif 412#endif
412 } 413 }
413 414
@@ -738,7 +739,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
738 739
739 fi->fib_nhs = nhs; 740 fi->fib_nhs = nhs;
740 change_nexthops(fi) { 741 change_nexthops(fi) {
741 nh->nh_parent = fi; 742 nexthop_nh->nh_parent = fi;
742 } endfor_nexthops(fi) 743 } endfor_nexthops(fi)
743 744
744 if (cfg->fc_mx) { 745 if (cfg->fc_mx) {
@@ -808,7 +809,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
808 goto failure; 809 goto failure;
809 } else { 810 } else {
810 change_nexthops(fi) { 811 change_nexthops(fi) {
811 if ((err = fib_check_nh(cfg, fi, nh)) != 0) 812 if ((err = fib_check_nh(cfg, fi, nexthop_nh)) != 0)
812 goto failure; 813 goto failure;
813 } endfor_nexthops(fi) 814 } endfor_nexthops(fi)
814 } 815 }
@@ -843,11 +844,11 @@ link_it:
843 struct hlist_head *head; 844 struct hlist_head *head;
844 unsigned int hash; 845 unsigned int hash;
845 846
846 if (!nh->nh_dev) 847 if (!nexthop_nh->nh_dev)
847 continue; 848 continue;
848 hash = fib_devindex_hashfn(nh->nh_dev->ifindex); 849 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
849 head = &fib_info_devhash[hash]; 850 head = &fib_info_devhash[hash];
850 hlist_add_head(&nh->nh_hash, head); 851 hlist_add_head(&nexthop_nh->nh_hash, head);
851 } endfor_nexthops(fi) 852 } endfor_nexthops(fi)
852 spin_unlock_bh(&fib_info_lock); 853 spin_unlock_bh(&fib_info_lock);
853 return fi; 854 return fi;
@@ -1080,21 +1081,21 @@ int fib_sync_down_dev(struct net_device *dev, int force)
1080 prev_fi = fi; 1081 prev_fi = fi;
1081 dead = 0; 1082 dead = 0;
1082 change_nexthops(fi) { 1083 change_nexthops(fi) {
1083 if (nh->nh_flags&RTNH_F_DEAD) 1084 if (nexthop_nh->nh_flags&RTNH_F_DEAD)
1084 dead++; 1085 dead++;
1085 else if (nh->nh_dev == dev && 1086 else if (nexthop_nh->nh_dev == dev &&
1086 nh->nh_scope != scope) { 1087 nexthop_nh->nh_scope != scope) {
1087 nh->nh_flags |= RTNH_F_DEAD; 1088 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1088#ifdef CONFIG_IP_ROUTE_MULTIPATH 1089#ifdef CONFIG_IP_ROUTE_MULTIPATH
1089 spin_lock_bh(&fib_multipath_lock); 1090 spin_lock_bh(&fib_multipath_lock);
1090 fi->fib_power -= nh->nh_power; 1091 fi->fib_power -= nexthop_nh->nh_power;
1091 nh->nh_power = 0; 1092 nexthop_nh->nh_power = 0;
1092 spin_unlock_bh(&fib_multipath_lock); 1093 spin_unlock_bh(&fib_multipath_lock);
1093#endif 1094#endif
1094 dead++; 1095 dead++;
1095 } 1096 }
1096#ifdef CONFIG_IP_ROUTE_MULTIPATH 1097#ifdef CONFIG_IP_ROUTE_MULTIPATH
1097 if (force > 1 && nh->nh_dev == dev) { 1098 if (force > 1 && nexthop_nh->nh_dev == dev) {
1098 dead = fi->fib_nhs; 1099 dead = fi->fib_nhs;
1099 break; 1100 break;
1100 } 1101 }
@@ -1144,18 +1145,20 @@ int fib_sync_up(struct net_device *dev)
1144 prev_fi = fi; 1145 prev_fi = fi;
1145 alive = 0; 1146 alive = 0;
1146 change_nexthops(fi) { 1147 change_nexthops(fi) {
1147 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1148 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1148 alive++; 1149 alive++;
1149 continue; 1150 continue;
1150 } 1151 }
1151 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) 1152 if (nexthop_nh->nh_dev == NULL ||
1153 !(nexthop_nh->nh_dev->flags&IFF_UP))
1152 continue; 1154 continue;
1153 if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev)) 1155 if (nexthop_nh->nh_dev != dev ||
1156 !__in_dev_get_rtnl(dev))
1154 continue; 1157 continue;
1155 alive++; 1158 alive++;
1156 spin_lock_bh(&fib_multipath_lock); 1159 spin_lock_bh(&fib_multipath_lock);
1157 nh->nh_power = 0; 1160 nexthop_nh->nh_power = 0;
1158 nh->nh_flags &= ~RTNH_F_DEAD; 1161 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1159 spin_unlock_bh(&fib_multipath_lock); 1162 spin_unlock_bh(&fib_multipath_lock);
1160 } endfor_nexthops(fi) 1163 } endfor_nexthops(fi)
1161 1164
@@ -1182,9 +1185,9 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1182 if (fi->fib_power <= 0) { 1185 if (fi->fib_power <= 0) {
1183 int power = 0; 1186 int power = 0;
1184 change_nexthops(fi) { 1187 change_nexthops(fi) {
1185 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1188 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1186 power += nh->nh_weight; 1189 power += nexthop_nh->nh_weight;
1187 nh->nh_power = nh->nh_weight; 1190 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1188 } 1191 }
1189 } endfor_nexthops(fi); 1192 } endfor_nexthops(fi);
1190 fi->fib_power = power; 1193 fi->fib_power = power;
@@ -1204,9 +1207,10 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1204 w = jiffies % fi->fib_power; 1207 w = jiffies % fi->fib_power;
1205 1208
1206 change_nexthops(fi) { 1209 change_nexthops(fi) {
1207 if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) { 1210 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD) &&
1208 if ((w -= nh->nh_power) <= 0) { 1211 nexthop_nh->nh_power) {
1209 nh->nh_power--; 1212 if ((w -= nexthop_nh->nh_power) <= 0) {
1213 nexthop_nh->nh_power--;
1210 fi->fib_power--; 1214 fi->fib_power--;
1211 res->nh_sel = nhsel; 1215 res->nh_sel = nhsel;
1212 spin_unlock_bh(&fib_multipath_lock); 1216 spin_unlock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index fe11f60ce41..4b4c2bcd15d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -114,7 +114,7 @@ struct icmp_bxm {
114/* An array of errno for error messages from dest unreach. */ 114/* An array of errno for error messages from dest unreach. */
115/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ 115/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
116 116
117struct icmp_err icmp_err_convert[] = { 117const struct icmp_err icmp_err_convert[] = {
118 { 118 {
119 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ 119 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */
120 .fatal = 0, 120 .fatal = 0,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 76c08402c93..d2836399874 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1799,7 +1799,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1799 iml->next = inet->mc_list; 1799 iml->next = inet->mc_list;
1800 iml->sflist = NULL; 1800 iml->sflist = NULL;
1801 iml->sfmode = MCAST_EXCLUDE; 1801 iml->sfmode = MCAST_EXCLUDE;
1802 inet->mc_list = iml; 1802 rcu_assign_pointer(inet->mc_list, iml);
1803 ip_mc_inc_group(in_dev, addr); 1803 ip_mc_inc_group(in_dev, addr);
1804 err = 0; 1804 err = 0;
1805done: 1805done:
@@ -1807,24 +1807,46 @@ done:
1807 return err; 1807 return err;
1808} 1808}
1809 1809
1810static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1811{
1812 struct ip_sf_socklist *psf;
1813
1814 psf = container_of(rp, struct ip_sf_socklist, rcu);
1815 /* sk_omem_alloc should have been decreased by the caller*/
1816 kfree(psf);
1817}
1818
1810static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 1819static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1811 struct in_device *in_dev) 1820 struct in_device *in_dev)
1812{ 1821{
1822 struct ip_sf_socklist *psf = iml->sflist;
1813 int err; 1823 int err;
1814 1824
1815 if (iml->sflist == NULL) { 1825 if (psf == NULL) {
1816 /* any-source empty exclude case */ 1826 /* any-source empty exclude case */
1817 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1827 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1818 iml->sfmode, 0, NULL, 0); 1828 iml->sfmode, 0, NULL, 0);
1819 } 1829 }
1820 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1830 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1821 iml->sfmode, iml->sflist->sl_count, 1831 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
1822 iml->sflist->sl_addr, 0); 1832 rcu_assign_pointer(iml->sflist, NULL);
1823 sock_kfree_s(sk, iml->sflist, IP_SFLSIZE(iml->sflist->sl_max)); 1833 /* decrease mem now to avoid the memleak warning */
1824 iml->sflist = NULL; 1834 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1835 call_rcu(&psf->rcu, ip_sf_socklist_reclaim);
1825 return err; 1836 return err;
1826} 1837}
1827 1838
1839
1840static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1841{
1842 struct ip_mc_socklist *iml;
1843
1844 iml = container_of(rp, struct ip_mc_socklist, rcu);
1845 /* sk_omem_alloc should have been decreased by the caller*/
1846 kfree(iml);
1847}
1848
1849
1828/* 1850/*
1829 * Ask a socket to leave a group. 1851 * Ask a socket to leave a group.
1830 */ 1852 */
@@ -1854,12 +1876,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1854 1876
1855 (void) ip_mc_leave_src(sk, iml, in_dev); 1877 (void) ip_mc_leave_src(sk, iml, in_dev);
1856 1878
1857 *imlp = iml->next; 1879 rcu_assign_pointer(*imlp, iml->next);
1858 1880
1859 if (in_dev) 1881 if (in_dev)
1860 ip_mc_dec_group(in_dev, group); 1882 ip_mc_dec_group(in_dev, group);
1861 rtnl_unlock(); 1883 rtnl_unlock();
1862 sock_kfree_s(sk, iml, sizeof(*iml)); 1884 /* decrease mem now to avoid the memleak warning */
1885 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1886 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
1863 return 0; 1887 return 0;
1864 } 1888 }
1865 if (!in_dev) 1889 if (!in_dev)
@@ -1974,9 +1998,12 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1974 if (psl) { 1998 if (psl) {
1975 for (i=0; i<psl->sl_count; i++) 1999 for (i=0; i<psl->sl_count; i++)
1976 newpsl->sl_addr[i] = psl->sl_addr[i]; 2000 newpsl->sl_addr[i] = psl->sl_addr[i];
1977 sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); 2001 /* decrease mem now to avoid the memleak warning */
2002 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2003 call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
1978 } 2004 }
1979 pmc->sflist = psl = newpsl; 2005 rcu_assign_pointer(pmc->sflist, newpsl);
2006 psl = newpsl;
1980 } 2007 }
1981 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2008 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
1982 for (i=0; i<psl->sl_count; i++) { 2009 for (i=0; i<psl->sl_count; i++) {
@@ -2072,11 +2099,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2072 if (psl) { 2099 if (psl) {
2073 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2100 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2074 psl->sl_count, psl->sl_addr, 0); 2101 psl->sl_count, psl->sl_addr, 0);
2075 sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); 2102 /* decrease mem now to avoid the memleak warning */
2103 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2104 call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
2076 } else 2105 } else
2077 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2106 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2078 0, NULL, 0); 2107 0, NULL, 0);
2079 pmc->sflist = newpsl; 2108 rcu_assign_pointer(pmc->sflist, newpsl);
2080 pmc->sfmode = msf->imsf_fmode; 2109 pmc->sfmode = msf->imsf_fmode;
2081 err = 0; 2110 err = 0;
2082done: 2111done:
@@ -2209,30 +2238,40 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2209 struct ip_mc_socklist *pmc; 2238 struct ip_mc_socklist *pmc;
2210 struct ip_sf_socklist *psl; 2239 struct ip_sf_socklist *psl;
2211 int i; 2240 int i;
2241 int ret;
2212 2242
2243 ret = 1;
2213 if (!ipv4_is_multicast(loc_addr)) 2244 if (!ipv4_is_multicast(loc_addr))
2214 return 1; 2245 goto out;
2215 2246
2216 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2247 rcu_read_lock();
2248 for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
2217 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2249 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2218 pmc->multi.imr_ifindex == dif) 2250 pmc->multi.imr_ifindex == dif)
2219 break; 2251 break;
2220 } 2252 }
2253 ret = inet->mc_all;
2221 if (!pmc) 2254 if (!pmc)
2222 return inet->mc_all; 2255 goto unlock;
2223 psl = pmc->sflist; 2256 psl = pmc->sflist;
2257 ret = (pmc->sfmode == MCAST_EXCLUDE);
2224 if (!psl) 2258 if (!psl)
2225 return pmc->sfmode == MCAST_EXCLUDE; 2259 goto unlock;
2226 2260
2227 for (i=0; i<psl->sl_count; i++) { 2261 for (i=0; i<psl->sl_count; i++) {
2228 if (psl->sl_addr[i] == rmt_addr) 2262 if (psl->sl_addr[i] == rmt_addr)
2229 break; 2263 break;
2230 } 2264 }
2265 ret = 0;
2231 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 2266 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
2232 return 0; 2267 goto unlock;
2233 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 2268 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
2234 return 0; 2269 goto unlock;
2235 return 1; 2270 ret = 1;
2271unlock:
2272 rcu_read_unlock();
2273out:
2274 return ret;
2236} 2275}
2237 2276
2238/* 2277/*
@@ -2251,7 +2290,7 @@ void ip_mc_drop_socket(struct sock *sk)
2251 rtnl_lock(); 2290 rtnl_lock();
2252 while ((iml = inet->mc_list) != NULL) { 2291 while ((iml = inet->mc_list) != NULL) {
2253 struct in_device *in_dev; 2292 struct in_device *in_dev;
2254 inet->mc_list = iml->next; 2293 rcu_assign_pointer(inet->mc_list, iml->next);
2255 2294
2256 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2295 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2257 (void) ip_mc_leave_src(sk, iml, in_dev); 2296 (void) ip_mc_leave_src(sk, iml, in_dev);
@@ -2259,7 +2298,9 @@ void ip_mc_drop_socket(struct sock *sk)
2259 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2298 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2260 in_dev_put(in_dev); 2299 in_dev_put(in_dev);
2261 } 2300 }
2262 sock_kfree_s(sk, iml, sizeof(*iml)); 2301 /* decrease mem now to avoid the memleak warning */
2302 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2303 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
2263 } 2304 }
2264 rtnl_unlock(); 2305 rtnl_unlock();
2265} 2306}
@@ -2603,7 +2644,7 @@ static const struct file_operations igmp_mcf_seq_fops = {
2603 .release = seq_release_net, 2644 .release = seq_release_net,
2604}; 2645};
2605 2646
2606static int igmp_net_init(struct net *net) 2647static int __net_init igmp_net_init(struct net *net)
2607{ 2648{
2608 struct proc_dir_entry *pde; 2649 struct proc_dir_entry *pde;
2609 2650
@@ -2621,7 +2662,7 @@ out_igmp:
2621 return -ENOMEM; 2662 return -ENOMEM;
2622} 2663}
2623 2664
2624static void igmp_net_exit(struct net *net) 2665static void __net_exit igmp_net_exit(struct net *net)
2625{ 2666{
2626 proc_net_remove(net, "mcfilter"); 2667 proc_net_remove(net, "mcfilter");
2627 proc_net_remove(net, "igmp"); 2668 proc_net_remove(net, "igmp");
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ee16475f8fc..8da6429269d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -529,6 +529,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
529 syn_ack_recalc(req, thresh, max_retries, 529 syn_ack_recalc(req, thresh, max_retries,
530 queue->rskq_defer_accept, 530 queue->rskq_defer_accept,
531 &expire, &resend); 531 &expire, &resend);
532 if (req->rsk_ops->syn_ack_timeout)
533 req->rsk_ops->syn_ack_timeout(parent, req);
532 if (!expire && 534 if (!expire &&
533 (!resend || 535 (!resend ||
534 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || 536 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 86964b353c3..b59430bc041 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -32,6 +32,8 @@
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/jhash.h> 33#include <linux/jhash.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <net/route.h>
36#include <net/dst.h>
35#include <net/sock.h> 37#include <net/sock.h>
36#include <net/ip.h> 38#include <net/ip.h>
37#include <net/icmp.h> 39#include <net/icmp.h>
@@ -205,11 +207,34 @@ static void ip_expire(unsigned long arg)
205 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { 207 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 struct sk_buff *head = qp->q.fragments; 208 struct sk_buff *head = qp->q.fragments;
207 209
208 /* Send an ICMP "Fragment Reassembly Timeout" message. */
209 rcu_read_lock(); 210 rcu_read_lock();
210 head->dev = dev_get_by_index_rcu(net, qp->iif); 211 head->dev = dev_get_by_index_rcu(net, qp->iif);
211 if (head->dev) 212 if (!head->dev)
212 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 213 goto out_rcu_unlock;
214
215 /*
216 * Only search router table for the head fragment,
217 * when defraging timeout at PRE_ROUTING HOOK.
218 */
219 if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
220 const struct iphdr *iph = ip_hdr(head);
221 int err = ip_route_input(head, iph->daddr, iph->saddr,
222 iph->tos, head->dev);
223 if (unlikely(err))
224 goto out_rcu_unlock;
225
226 /*
227 * Only an end host needs to send an ICMP
228 * "Fragment Reassembly Timeout" message, per RFC792.
229 */
230 if (skb_rtable(head)->rt_type != RTN_LOCAL)
231 goto out_rcu_unlock;
232
233 }
234
235 /* Send an ICMP "Fragment Reassembly Timeout" message. */
236 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
237out_rcu_unlock:
213 rcu_read_unlock(); 238 rcu_read_unlock();
214 } 239 }
215out: 240out:
@@ -646,7 +671,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
646 { } 671 { }
647}; 672};
648 673
649static int ip4_frags_ns_ctl_register(struct net *net) 674static int __net_init ip4_frags_ns_ctl_register(struct net *net)
650{ 675{
651 struct ctl_table *table; 676 struct ctl_table *table;
652 struct ctl_table_header *hdr; 677 struct ctl_table_header *hdr;
@@ -676,7 +701,7 @@ err_alloc:
676 return -ENOMEM; 701 return -ENOMEM;
677} 702}
678 703
679static void ip4_frags_ns_ctl_unregister(struct net *net) 704static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
680{ 705{
681 struct ctl_table *table; 706 struct ctl_table *table;
682 707
@@ -704,7 +729,7 @@ static inline void ip4_frags_ctl_register(void)
704} 729}
705#endif 730#endif
706 731
707static int ipv4_frags_init_net(struct net *net) 732static int __net_init ipv4_frags_init_net(struct net *net)
708{ 733{
709 /* 734 /*
710 * Fragment cache limits. We will commit 256K at one time. Should we 735 * Fragment cache limits. We will commit 256K at one time. Should we
@@ -726,7 +751,7 @@ static int ipv4_frags_init_net(struct net *net)
726 return ip4_frags_ns_ctl_register(net); 751 return ip4_frags_ns_ctl_register(net);
727} 752}
728 753
729static void ipv4_frags_exit_net(struct net *net) 754static void __net_exit ipv4_frags_exit_net(struct net *net)
730{ 755{
731 ip4_frags_ns_ctl_unregister(net); 756 ip4_frags_ns_ctl_unregister(net);
732 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 757 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f36ce156cac..7631b20490f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1307,7 +1307,7 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1307 } 1307 }
1308} 1308}
1309 1309
1310static int ipgre_init_net(struct net *net) 1310static int __net_init ipgre_init_net(struct net *net)
1311{ 1311{
1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1313 int err; 1313 int err;
@@ -1334,7 +1334,7 @@ err_alloc_dev:
1334 return err; 1334 return err;
1335} 1335}
1336 1336
1337static void ipgre_exit_net(struct net *net) 1337static void __net_exit ipgre_exit_net(struct net *net)
1338{ 1338{
1339 struct ipgre_net *ign; 1339 struct ipgre_net *ign;
1340 LIST_HEAD(list); 1340 LIST_HEAD(list);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index cafad9baff0..644dc43a55d 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -451,7 +451,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
451 (1<<IP_TTL) | (1<<IP_HDRINCL) | 451 (1<<IP_TTL) | (1<<IP_HDRINCL) |
452 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 452 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || 454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
455 (1<<IP_MINTTL))) ||
455 optname == IP_MULTICAST_TTL || 456 optname == IP_MULTICAST_TTL ||
456 optname == IP_MULTICAST_ALL || 457 optname == IP_MULTICAST_ALL ||
457 optname == IP_MULTICAST_LOOP || 458 optname == IP_MULTICAST_LOOP ||
@@ -936,6 +937,14 @@ mc_msf_out:
936 inet->transparent = !!val; 937 inet->transparent = !!val;
937 break; 938 break;
938 939
940 case IP_MINTTL:
941 if (optlen < 1)
942 goto e_inval;
943 if (val < 0 || val > 255)
944 goto e_inval;
945 inet->min_ttl = val;
946 break;
947
939 default: 948 default:
940 err = -ENOPROTOOPT; 949 err = -ENOPROTOOPT;
941 break; 950 break;
@@ -1198,6 +1207,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1198 case IP_TRANSPARENT: 1207 case IP_TRANSPARENT:
1199 val = inet->transparent; 1208 val = inet->transparent;
1200 break; 1209 break;
1210 case IP_MINTTL:
1211 val = inet->min_ttl;
1212 break;
1201 default: 1213 default:
1202 release_sock(sk); 1214 release_sock(sk);
1203 return -ENOPROTOOPT; 1215 return -ENOPROTOOPT;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 38fbf04150a..b55a0c3df82 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -25,6 +25,7 @@
25 25
26static void ipcomp4_err(struct sk_buff *skb, u32 info) 26static void ipcomp4_err(struct sk_buff *skb, u32 info)
27{ 27{
28 struct net *net = dev_net(skb->dev);
28 __be32 spi; 29 __be32 spi;
29 struct iphdr *iph = (struct iphdr *)skb->data; 30 struct iphdr *iph = (struct iphdr *)skb->data;
30 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
@@ -35,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
35 return; 36 return;
36 37
37 spi = htonl(ntohs(ipch->cpi)); 38 spi = htonl(ntohs(ipch->cpi));
38 x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, 39 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr,
39 spi, IPPROTO_COMP, AF_INET); 40 spi, IPPROTO_COMP, AF_INET);
40 if (!x) 41 if (!x)
41 return; 42 return;
@@ -47,9 +48,10 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
47/* We always hold one tunnel user reference to indicate a tunnel */ 48/* We always hold one tunnel user reference to indicate a tunnel */
48static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) 49static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
49{ 50{
51 struct net *net = xs_net(x);
50 struct xfrm_state *t; 52 struct xfrm_state *t;
51 53
52 t = xfrm_state_alloc(&init_net); 54 t = xfrm_state_alloc(net);
53 if (t == NULL) 55 if (t == NULL)
54 goto out; 56 goto out;
55 57
@@ -82,10 +84,11 @@ error:
82 */ 84 */
83static int ipcomp_tunnel_attach(struct xfrm_state *x) 85static int ipcomp_tunnel_attach(struct xfrm_state *x)
84{ 86{
87 struct net *net = xs_net(x);
85 int err = 0; 88 int err = 0;
86 struct xfrm_state *t; 89 struct xfrm_state *t;
87 90
88 t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4, 91 t = xfrm_state_lookup(net, (xfrm_address_t *)&x->id.daddr.a4,
89 x->props.saddr.a4, IPPROTO_IPIP, AF_INET); 92 x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
90 if (!t) { 93 if (!t) {
91 t = ipcomp_tunnel_create(x); 94 t = ipcomp_tunnel_create(x);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index eda04fed337..95db732e542 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -130,7 +130,6 @@ struct ipip_net {
130 struct net_device *fb_tunnel_dev; 130 struct net_device *fb_tunnel_dev;
131}; 131};
132 132
133static void ipip_fb_tunnel_init(struct net_device *dev);
134static void ipip_tunnel_init(struct net_device *dev); 133static void ipip_tunnel_init(struct net_device *dev);
135static void ipip_tunnel_setup(struct net_device *dev); 134static void ipip_tunnel_setup(struct net_device *dev);
136 135
@@ -730,7 +729,7 @@ static void ipip_tunnel_init(struct net_device *dev)
730 ipip_tunnel_bind_dev(dev); 729 ipip_tunnel_bind_dev(dev);
731} 730}
732 731
733static void ipip_fb_tunnel_init(struct net_device *dev) 732static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
734{ 733{
735 struct ip_tunnel *tunnel = netdev_priv(dev); 734 struct ip_tunnel *tunnel = netdev_priv(dev);
736 struct iphdr *iph = &tunnel->parms.iph; 735 struct iphdr *iph = &tunnel->parms.iph;
@@ -773,7 +772,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
773 } 772 }
774} 773}
775 774
776static int ipip_init_net(struct net *net) 775static int __net_init ipip_init_net(struct net *net)
777{ 776{
778 struct ipip_net *ipn = net_generic(net, ipip_net_id); 777 struct ipip_net *ipn = net_generic(net, ipip_net_id);
779 int err; 778 int err;
@@ -806,7 +805,7 @@ err_alloc_dev:
806 return err; 805 return err;
807} 806}
808 807
809static void ipip_exit_net(struct net *net) 808static void __net_exit ipip_exit_net(struct net *net)
810{ 809{
811 struct ipip_net *ipn = net_generic(net, ipip_net_id); 810 struct ipip_net *ipn = net_generic(net, ipip_net_id);
812 LIST_HEAD(list); 811 LIST_HEAD(list);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f25542c48b7..1b09a6dde7c 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -127,8 +127,8 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
127 SNMP_MIB_SENTINEL 127 SNMP_MIB_SENTINEL
128}; 128};
129 129
130static struct { 130static const struct {
131 char *name; 131 const char *name;
132 int index; 132 int index;
133} icmpmibmap[] = { 133} icmpmibmap[] = {
134 { "DestUnreachs", ICMP_DEST_UNREACH }, 134 { "DestUnreachs", ICMP_DEST_UNREACH },
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d62b05d3338..b16dfadbe6d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1990,8 +1990,13 @@ static int __mkroute_input(struct sk_buff *skb,
1990 if (skb->protocol != htons(ETH_P_IP)) { 1990 if (skb->protocol != htons(ETH_P_IP)) {
1991 /* Not IP (i.e. ARP). Do not create route, if it is 1991 /* Not IP (i.e. ARP). Do not create route, if it is
1992 * invalid for proxy arp. DNAT routes are always valid. 1992 * invalid for proxy arp. DNAT routes are always valid.
1993 *
1994 * Proxy arp feature have been extended to allow, ARP
1995 * replies back to the same interface, to support
1996 * Private VLAN switch technologies. See arp.c.
1993 */ 1997 */
1994 if (out_dev == in_dev) { 1998 if (out_dev == in_dev &&
1999 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1995 err = -EINVAL; 2000 err = -EINVAL;
1996 goto cleanup; 2001 goto cleanup;
1997 } 2002 }
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 66fd80ef247..5c24db4a3c9 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -358,7 +358,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
358 358
359 tcp_select_initial_window(tcp_full_space(sk), req->mss, 359 tcp_select_initial_window(tcp_full_space(sk), req->mss,
360 &req->rcv_wnd, &req->window_clamp, 360 &req->rcv_wnd, &req->window_clamp,
361 ireq->wscale_ok, &rcv_wscale); 361 ireq->wscale_ok, &rcv_wscale,
362 dst_metric(&rt->u.dst, RTAX_INITRWND));
362 363
363 ireq->rcv_wscale = rcv_wscale; 364 ireq->rcv_wscale = rcv_wscale;
364 365
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b0a26bb25e2..d5d69ea8f24 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -536,8 +536,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
536 tp->nonagle &= ~TCP_NAGLE_PUSH; 536 tp->nonagle &= ~TCP_NAGLE_PUSH;
537} 537}
538 538
539static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, 539static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
540 struct sk_buff *skb)
541{ 540{
542 if (flags & MSG_OOB) 541 if (flags & MSG_OOB)
543 tp->snd_up = tp->write_seq; 542 tp->snd_up = tp->write_seq;
@@ -546,13 +545,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
546static inline void tcp_push(struct sock *sk, int flags, int mss_now, 545static inline void tcp_push(struct sock *sk, int flags, int mss_now,
547 int nonagle) 546 int nonagle)
548{ 547{
549 struct tcp_sock *tp = tcp_sk(sk);
550
551 if (tcp_send_head(sk)) { 548 if (tcp_send_head(sk)) {
552 struct sk_buff *skb = tcp_write_queue_tail(sk); 549 struct tcp_sock *tp = tcp_sk(sk);
550
553 if (!(flags & MSG_MORE) || forced_push(tp)) 551 if (!(flags & MSG_MORE) || forced_push(tp))
554 tcp_mark_push(tp, skb); 552 tcp_mark_push(tp, tcp_write_queue_tail(sk));
555 tcp_mark_urg(tp, flags, skb); 553
554 tcp_mark_urg(tp, flags);
556 __tcp_push_pending_frames(sk, mss_now, 555 __tcp_push_pending_frames(sk, mss_now,
557 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 556 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
558 } 557 }
@@ -877,12 +876,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
877#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 876#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
878#define TCP_OFF(sk) (sk->sk_sndmsg_off) 877#define TCP_OFF(sk) (sk->sk_sndmsg_off)
879 878
880static inline int select_size(struct sock *sk) 879static inline int select_size(struct sock *sk, int sg)
881{ 880{
882 struct tcp_sock *tp = tcp_sk(sk); 881 struct tcp_sock *tp = tcp_sk(sk);
883 int tmp = tp->mss_cache; 882 int tmp = tp->mss_cache;
884 883
885 if (sk->sk_route_caps & NETIF_F_SG) { 884 if (sg) {
886 if (sk_can_gso(sk)) 885 if (sk_can_gso(sk))
887 tmp = 0; 886 tmp = 0;
888 else { 887 else {
@@ -906,7 +905,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
906 struct sk_buff *skb; 905 struct sk_buff *skb;
907 int iovlen, flags; 906 int iovlen, flags;
908 int mss_now, size_goal; 907 int mss_now, size_goal;
909 int err, copied; 908 int sg, err, copied;
910 long timeo; 909 long timeo;
911 910
912 lock_sock(sk); 911 lock_sock(sk);
@@ -934,6 +933,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
934 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 933 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
935 goto out_err; 934 goto out_err;
936 935
936 sg = sk->sk_route_caps & NETIF_F_SG;
937
937 while (--iovlen >= 0) { 938 while (--iovlen >= 0) {
938 int seglen = iov->iov_len; 939 int seglen = iov->iov_len;
939 unsigned char __user *from = iov->iov_base; 940 unsigned char __user *from = iov->iov_base;
@@ -959,8 +960,9 @@ new_segment:
959 if (!sk_stream_memory_free(sk)) 960 if (!sk_stream_memory_free(sk))
960 goto wait_for_sndbuf; 961 goto wait_for_sndbuf;
961 962
962 skb = sk_stream_alloc_skb(sk, select_size(sk), 963 skb = sk_stream_alloc_skb(sk,
963 sk->sk_allocation); 964 select_size(sk, sg),
965 sk->sk_allocation);
964 if (!skb) 966 if (!skb)
965 goto wait_for_memory; 967 goto wait_for_memory;
966 968
@@ -997,9 +999,7 @@ new_segment:
997 /* We can extend the last page 999 /* We can extend the last page
998 * fragment. */ 1000 * fragment. */
999 merge = 1; 1001 merge = 1;
1000 } else if (i == MAX_SKB_FRAGS || 1002 } else if (i == MAX_SKB_FRAGS || !sg) {
1001 (!i &&
1002 !(sk->sk_route_caps & NETIF_F_SG))) {
1003 /* Need to add new fragment and cannot 1003 /* Need to add new fragment and cannot
1004 * do this because interface is non-SG, 1004 * do this because interface is non-SG,
1005 * or because all the page slots are 1005 * or because all the page slots are
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 65b8ebfd078..c3588b4fd97 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -742,9 +742,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
742 * This still operates on a request_sock only, not on a big 742 * This still operates on a request_sock only, not on a big
743 * socket. 743 * socket.
744 */ 744 */
745static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 745static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
746 struct request_sock *req, 746 struct request_sock *req,
747 struct request_values *rvp) 747 struct request_values *rvp)
748{ 748{
749 const struct inet_request_sock *ireq = inet_rsk(req); 749 const struct inet_request_sock *ireq = inet_rsk(req);
750 int err = -1; 750 int err = -1;
@@ -775,10 +775,11 @@ static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
775 return err; 775 return err;
776} 776}
777 777
778static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, 778static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
779 struct request_values *rvp) 779 struct request_values *rvp)
780{ 780{
781 return __tcp_v4_send_synack(sk, NULL, req, rvp); 781 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
782 return tcp_v4_send_synack(sk, NULL, req, rvp);
782} 783}
783 784
784/* 785/*
@@ -1192,10 +1193,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1192struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1193struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1193 .family = PF_INET, 1194 .family = PF_INET,
1194 .obj_size = sizeof(struct tcp_request_sock), 1195 .obj_size = sizeof(struct tcp_request_sock),
1195 .rtx_syn_ack = tcp_v4_send_synack, 1196 .rtx_syn_ack = tcp_v4_rtx_synack,
1196 .send_ack = tcp_v4_reqsk_send_ack, 1197 .send_ack = tcp_v4_reqsk_send_ack,
1197 .destructor = tcp_v4_reqsk_destructor, 1198 .destructor = tcp_v4_reqsk_destructor,
1198 .send_reset = tcp_v4_send_reset, 1199 .send_reset = tcp_v4_send_reset,
1200 .syn_ack_timeout = tcp_syn_ack_timeout,
1199}; 1201};
1200 1202
1201#ifdef CONFIG_TCP_MD5SIG 1203#ifdef CONFIG_TCP_MD5SIG
@@ -1373,8 +1375,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1373 } 1375 }
1374 tcp_rsk(req)->snt_isn = isn; 1376 tcp_rsk(req)->snt_isn = isn;
1375 1377
1376 if (__tcp_v4_send_synack(sk, dst, req, 1378 if (tcp_v4_send_synack(sk, dst, req,
1377 (struct request_values *)&tmp_ext) || 1379 (struct request_values *)&tmp_ext) ||
1378 want_cookie) 1380 want_cookie)
1379 goto drop_and_free; 1381 goto drop_and_free;
1380 1382
@@ -1649,6 +1651,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
1649 if (!sk) 1651 if (!sk)
1650 goto no_tcp_socket; 1652 goto no_tcp_socket;
1651 1653
1654 if (iph->ttl < inet_sk(sk)->min_ttl)
1655 goto discard_and_relse;
1656
1652process: 1657process:
1653 if (sk->sk_state == TCP_TIME_WAIT) 1658 if (sk->sk_state == TCP_TIME_WAIT)
1654 goto do_time_wait; 1659 goto do_time_wait;
@@ -2425,12 +2430,12 @@ static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2425 }, 2430 },
2426}; 2431};
2427 2432
2428static int tcp4_proc_init_net(struct net *net) 2433static int __net_init tcp4_proc_init_net(struct net *net)
2429{ 2434{
2430 return tcp_proc_register(net, &tcp4_seq_afinfo); 2435 return tcp_proc_register(net, &tcp4_seq_afinfo);
2431} 2436}
2432 2437
2433static void tcp4_proc_exit_net(struct net *net) 2438static void __net_exit tcp4_proc_exit_net(struct net *net)
2434{ 2439{
2435 tcp_proc_unregister(net, &tcp4_seq_afinfo); 2440 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2436} 2441}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 383ce237640..4a1605d3f90 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -183,7 +183,8 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
183 */ 183 */
184void tcp_select_initial_window(int __space, __u32 mss, 184void tcp_select_initial_window(int __space, __u32 mss,
185 __u32 *rcv_wnd, __u32 *window_clamp, 185 __u32 *rcv_wnd, __u32 *window_clamp,
186 int wscale_ok, __u8 *rcv_wscale) 186 int wscale_ok, __u8 *rcv_wscale,
187 __u32 init_rcv_wnd)
187{ 188{
188 unsigned int space = (__space < 0 ? 0 : __space); 189 unsigned int space = (__space < 0 ? 0 : __space);
189 190
@@ -232,7 +233,13 @@ void tcp_select_initial_window(int __space, __u32 mss,
232 init_cwnd = 2; 233 init_cwnd = 2;
233 else if (mss > 1460) 234 else if (mss > 1460)
234 init_cwnd = 3; 235 init_cwnd = 3;
235 if (*rcv_wnd > init_cwnd * mss) 236 /* when initializing use the value from init_rcv_wnd
237 * rather than the default from above
238 */
239 if (init_rcv_wnd &&
240 (*rcv_wnd > init_rcv_wnd * mss))
241 *rcv_wnd = init_rcv_wnd * mss;
242 else if (*rcv_wnd > init_cwnd * mss)
236 *rcv_wnd = init_cwnd * mss; 243 *rcv_wnd = init_cwnd * mss;
237 } 244 }
238 245
@@ -1794,11 +1801,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1794void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1801void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1795 int nonagle) 1802 int nonagle)
1796{ 1803{
1797 struct sk_buff *skb = tcp_send_head(sk);
1798
1799 if (!skb)
1800 return;
1801
1802 /* If we are closed, the bytes will have to remain here. 1804 /* If we are closed, the bytes will have to remain here.
1803 * In time closedown will finish, we empty the write queue and 1805 * In time closedown will finish, we empty the write queue and
1804 * all will be happy. 1806 * all will be happy.
@@ -2422,7 +2424,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2422 &req->rcv_wnd, 2424 &req->rcv_wnd,
2423 &req->window_clamp, 2425 &req->window_clamp,
2424 ireq->wscale_ok, 2426 ireq->wscale_ok,
2425 &rcv_wscale); 2427 &rcv_wscale,
2428 dst_metric(dst, RTAX_INITRWND));
2426 ireq->rcv_wscale = rcv_wscale; 2429 ireq->rcv_wscale = rcv_wscale;
2427 } 2430 }
2428 2431
@@ -2549,7 +2552,8 @@ static void tcp_connect_init(struct sock *sk)
2549 &tp->rcv_wnd, 2552 &tp->rcv_wnd,
2550 &tp->window_clamp, 2553 &tp->window_clamp,
2551 sysctl_tcp_window_scaling, 2554 sysctl_tcp_window_scaling,
2552 &rcv_wscale); 2555 &rcv_wscale,
2556 dst_metric(dst, RTAX_INITRWND));
2553 2557
2554 tp->rx_opt.rcv_wscale = rcv_wscale; 2558 tp->rx_opt.rcv_wscale = rcv_wscale;
2555 tp->rcv_ssthresh = tp->rcv_wnd; 2559 tp->rcv_ssthresh = tp->rcv_wnd;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8816a20c259..de7d1bf9114 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -474,6 +474,12 @@ static void tcp_synack_timer(struct sock *sk)
474 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 474 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
475} 475}
476 476
477void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
478{
479 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
480}
481EXPORT_SYMBOL(tcp_syn_ack_timeout);
482
477void tcp_set_keepalive(struct sock *sk, int val) 483void tcp_set_keepalive(struct sock *sk, int val)
478{ 484{
479 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 485 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f0126fdd7e0..4f7d2122d81 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2027,12 +2027,12 @@ static struct udp_seq_afinfo udp4_seq_afinfo = {
2027 }, 2027 },
2028}; 2028};
2029 2029
2030static int udp4_proc_init_net(struct net *net) 2030static int __net_init udp4_proc_init_net(struct net *net)
2031{ 2031{
2032 return udp_proc_register(net, &udp4_seq_afinfo); 2032 return udp_proc_register(net, &udp4_seq_afinfo);
2033} 2033}
2034 2034
2035static void udp4_proc_exit_net(struct net *net) 2035static void __net_exit udp4_proc_exit_net(struct net *net)
2036{ 2036{
2037 udp_proc_unregister(net, &udp4_seq_afinfo); 2037 udp_proc_unregister(net, &udp4_seq_afinfo);
2038} 2038}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 66f79513f4a..6610bf76369 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -81,12 +81,12 @@ static struct udp_seq_afinfo udplite4_seq_afinfo = {
81 }, 81 },
82}; 82};
83 83
84static int udplite4_proc_init_net(struct net *net) 84static int __net_init udplite4_proc_init_net(struct net *net)
85{ 85{
86 return udp_proc_register(net, &udplite4_seq_afinfo); 86 return udp_proc_register(net, &udplite4_seq_afinfo);
87} 87}
88 88
89static void udplite4_proc_exit_net(struct net *net) 89static void __net_exit udplite4_proc_exit_net(struct net *net)
90{ 90{
91 udp_proc_unregister(net, &udplite4_seq_afinfo); 91 udp_proc_unregister(net, &udplite4_seq_afinfo);
92} 92}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index de7a194a64a..1593289155f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3027,14 +3027,14 @@ static const struct file_operations if6_fops = {
3027 .release = seq_release_net, 3027 .release = seq_release_net,
3028}; 3028};
3029 3029
3030static int if6_proc_net_init(struct net *net) 3030static int __net_init if6_proc_net_init(struct net *net)
3031{ 3031{
3032 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops)) 3032 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops))
3033 return -ENOMEM; 3033 return -ENOMEM;
3034 return 0; 3034 return 0;
3035} 3035}
3036 3036
3037static void if6_proc_net_exit(struct net *net) 3037static void __net_exit if6_proc_net_exit(struct net *net)
3038{ 3038{
3039 proc_net_remove(net, "if_inet6"); 3039 proc_net_remove(net, "if_inet6");
3040} 3040}
@@ -4418,7 +4418,7 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
4418 4418
4419#endif 4419#endif
4420 4420
4421static int addrconf_init_net(struct net *net) 4421static int __net_init addrconf_init_net(struct net *net)
4422{ 4422{
4423 int err; 4423 int err;
4424 struct ipv6_devconf *all, *dflt; 4424 struct ipv6_devconf *all, *dflt;
@@ -4467,7 +4467,7 @@ err_alloc_all:
4467 return err; 4467 return err;
4468} 4468}
4469 4469
4470static void addrconf_exit_net(struct net *net) 4470static void __net_exit addrconf_exit_net(struct net *net)
4471{ 4471{
4472#ifdef CONFIG_SYSCTL 4472#ifdef CONFIG_SYSCTL
4473 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt); 4473 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 12e69d364dd..e29160ff4a0 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -999,7 +999,7 @@ err_udplite_mib:
999 return -ENOMEM; 999 return -ENOMEM;
1000} 1000}
1001 1001
1002static void __net_exit ipv6_cleanup_mibs(struct net *net) 1002static void ipv6_cleanup_mibs(struct net *net)
1003{ 1003{
1004 snmp_mib_free((void **)net->mib.udp_stats_in6); 1004 snmp_mib_free((void **)net->mib.udp_stats_in6);
1005 snmp_mib_free((void **)net->mib.udplite_stats_in6); 1005 snmp_mib_free((void **)net->mib.udplite_stats_in6);
@@ -1042,7 +1042,7 @@ out:
1042#endif 1042#endif
1043} 1043}
1044 1044
1045static void inet6_net_exit(struct net *net) 1045static void __net_exit inet6_net_exit(struct net *net)
1046{ 1046{
1047#ifdef CONFIG_PROC_FS 1047#ifdef CONFIG_PROC_FS
1048 udp6_proc_exit(net); 1048 udp6_proc_exit(net);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index f1c74c8ef9d..c4f6ca32fa7 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -538,7 +538,7 @@ static const struct file_operations ac6_seq_fops = {
538 .release = seq_release_net, 538 .release = seq_release_net,
539}; 539};
540 540
541int ac6_proc_init(struct net *net) 541int __net_init ac6_proc_init(struct net *net)
542{ 542{
543 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops)) 543 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops))
544 return -ENOMEM; 544 return -ENOMEM;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b7aa7c64cc4..551882b9dfd 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -262,7 +262,7 @@ static struct fib_rules_ops fib6_rules_ops_template = {
262 .fro_net = &init_net, 262 .fro_net = &init_net,
263}; 263};
264 264
265static int fib6_rules_net_init(struct net *net) 265static int __net_init fib6_rules_net_init(struct net *net)
266{ 266{
267 struct fib_rules_ops *ops; 267 struct fib_rules_ops *ops;
268 int err = -ENOMEM; 268 int err = -ENOMEM;
@@ -291,7 +291,7 @@ out_fib6_rules_ops:
291 goto out; 291 goto out;
292} 292}
293 293
294static void fib6_rules_net_exit(struct net *net) 294static void __net_exit fib6_rules_net_exit(struct net *net)
295{ 295{
296 fib_rules_unregister(net->ipv6.fib6_rules_ops); 296 fib_rules_unregister(net->ipv6.fib6_rules_ops);
297} 297}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4ae661bc367..217dbc2e28d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -951,7 +951,7 @@ ctl_table ipv6_icmp_table_template[] = {
951 { }, 951 { },
952}; 952};
953 953
954struct ctl_table *ipv6_icmp_sysctl_init(struct net *net) 954struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
955{ 955{
956 struct ctl_table *table; 956 struct ctl_table *table;
957 957
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 0e93ca56eb6..f626ea2b304 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -239,7 +239,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
239 return NULL; 239 return NULL;
240} 240}
241 241
242static void fib6_tables_init(struct net *net) 242static void __net_init fib6_tables_init(struct net *net)
243{ 243{
244 fib6_link_table(net, net->ipv6.fib6_main_tbl); 244 fib6_link_table(net, net->ipv6.fib6_main_tbl);
245 fib6_link_table(net, net->ipv6.fib6_local_tbl); 245 fib6_link_table(net, net->ipv6.fib6_local_tbl);
@@ -262,7 +262,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
262 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags); 262 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags);
263} 263}
264 264
265static void fib6_tables_init(struct net *net) 265static void __net_init fib6_tables_init(struct net *net)
266{ 266{
267 fib6_link_table(net, net->ipv6.fib6_main_tbl); 267 fib6_link_table(net, net->ipv6.fib6_main_tbl);
268} 268}
@@ -1469,7 +1469,7 @@ static void fib6_gc_timer_cb(unsigned long arg)
1469 fib6_run_gc(0, (struct net *)arg); 1469 fib6_run_gc(0, (struct net *)arg);
1470} 1470}
1471 1471
1472static int fib6_net_init(struct net *net) 1472static int __net_init fib6_net_init(struct net *net)
1473{ 1473{
1474 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net); 1474 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
1475 1475
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 6e7bffa2205..e41eba8aacf 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -154,7 +154,7 @@ static void ip6_fl_gc(unsigned long dummy)
154 write_unlock(&ip6_fl_lock); 154 write_unlock(&ip6_fl_lock);
155} 155}
156 156
157static void ip6_fl_purge(struct net *net) 157static void __net_exit ip6_fl_purge(struct net *net)
158{ 158{
159 int i; 159 int i;
160 160
@@ -735,7 +735,7 @@ static const struct file_operations ip6fl_seq_fops = {
735 .release = seq_release_net, 735 .release = seq_release_net,
736}; 736};
737 737
738static int ip6_flowlabel_proc_init(struct net *net) 738static int __net_init ip6_flowlabel_proc_init(struct net *net)
739{ 739{
740 if (!proc_net_fops_create(net, "ip6_flowlabel", 740 if (!proc_net_fops_create(net, "ip6_flowlabel",
741 S_IRUGO, &ip6fl_seq_fops)) 741 S_IRUGO, &ip6fl_seq_fops))
@@ -743,7 +743,7 @@ static int ip6_flowlabel_proc_init(struct net *net)
743 return 0; 743 return 0;
744} 744}
745 745
746static void ip6_flowlabel_proc_fini(struct net *net) 746static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
747{ 747{
748 proc_net_remove(net, "ip6_flowlabel"); 748 proc_net_remove(net, "ip6_flowlabel");
749} 749}
@@ -754,11 +754,10 @@ static inline int ip6_flowlabel_proc_init(struct net *net)
754} 754}
755static inline void ip6_flowlabel_proc_fini(struct net *net) 755static inline void ip6_flowlabel_proc_fini(struct net *net)
756{ 756{
757 return ;
758} 757}
759#endif 758#endif
760 759
761static inline void ip6_flowlabel_net_exit(struct net *net) 760static void __net_exit ip6_flowlabel_net_exit(struct net *net)
762{ 761{
763 ip6_fl_purge(net); 762 ip6_fl_purge(net);
764 ip6_flowlabel_proc_fini(net); 763 ip6_flowlabel_proc_fini(net);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d453d07b0df..fbd786981aa 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -74,7 +74,6 @@ MODULE_LICENSE("GPL");
74 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ 74 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
75 (HASH_SIZE - 1)) 75 (HASH_SIZE - 1))
76 76
77static void ip6_fb_tnl_dev_init(struct net_device *dev);
78static void ip6_tnl_dev_init(struct net_device *dev); 77static void ip6_tnl_dev_init(struct net_device *dev);
79static void ip6_tnl_dev_setup(struct net_device *dev); 78static void ip6_tnl_dev_setup(struct net_device *dev);
80 79
@@ -1364,7 +1363,7 @@ static void ip6_tnl_dev_init(struct net_device *dev)
1364 * Return: 0 1363 * Return: 0
1365 **/ 1364 **/
1366 1365
1367static void ip6_fb_tnl_dev_init(struct net_device *dev) 1366static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1368{ 1367{
1369 struct ip6_tnl *t = netdev_priv(dev); 1368 struct ip6_tnl *t = netdev_priv(dev);
1370 struct net *net = dev_net(dev); 1369 struct net *net = dev_net(dev);
@@ -1388,7 +1387,7 @@ static struct xfrm6_tunnel ip6ip6_handler = {
1388 .priority = 1, 1387 .priority = 1,
1389}; 1388};
1390 1389
1391static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) 1390static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1392{ 1391{
1393 int h; 1392 int h;
1394 struct ip6_tnl *t; 1393 struct ip6_tnl *t;
@@ -1407,7 +1406,7 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1407 unregister_netdevice_many(&list); 1406 unregister_netdevice_many(&list);
1408} 1407}
1409 1408
1410static int ip6_tnl_init_net(struct net *net) 1409static int __net_init ip6_tnl_init_net(struct net *net)
1411{ 1410{
1412 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1411 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1413 int err; 1412 int err;
@@ -1436,7 +1435,7 @@ err_alloc_dev:
1436 return err; 1435 return err;
1437} 1436}
1438 1437
1439static void ip6_tnl_exit_net(struct net *net) 1438static void __net_exit ip6_tnl_exit_net(struct net *net)
1440{ 1439{
1441 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1440 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1442 1441
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 2f2a5ca2c87..a9fbb151bb7 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -53,6 +53,7 @@
53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
54 u8 type, u8 code, int offset, __be32 info) 54 u8 type, u8 code, int offset, __be32 info)
55{ 55{
56 struct net *net = dev_net(skb->dev);
56 __be32 spi; 57 __be32 spi;
57 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 58 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
58 struct ip_comp_hdr *ipcomph = 59 struct ip_comp_hdr *ipcomph =
@@ -63,7 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
63 return; 64 return;
64 65
65 spi = htonl(ntohs(ipcomph->cpi)); 66 spi = htonl(ntohs(ipcomph->cpi));
66 x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 67 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
67 if (!x) 68 if (!x)
68 return; 69 return;
69 70
@@ -74,14 +75,15 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
74 75
75static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) 76static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
76{ 77{
78 struct net *net = xs_net(x);
77 struct xfrm_state *t = NULL; 79 struct xfrm_state *t = NULL;
78 80
79 t = xfrm_state_alloc(&init_net); 81 t = xfrm_state_alloc(net);
80 if (!t) 82 if (!t)
81 goto out; 83 goto out;
82 84
83 t->id.proto = IPPROTO_IPV6; 85 t->id.proto = IPPROTO_IPV6;
84 t->id.spi = xfrm6_tunnel_alloc_spi((xfrm_address_t *)&x->props.saddr); 86 t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
85 if (!t->id.spi) 87 if (!t->id.spi)
86 goto error; 88 goto error;
87 89
@@ -108,13 +110,14 @@ error:
108 110
109static int ipcomp6_tunnel_attach(struct xfrm_state *x) 111static int ipcomp6_tunnel_attach(struct xfrm_state *x)
110{ 112{
113 struct net *net = xs_net(x);
111 int err = 0; 114 int err = 0;
112 struct xfrm_state *t = NULL; 115 struct xfrm_state *t = NULL;
113 __be32 spi; 116 __be32 spi;
114 117
115 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&x->props.saddr); 118 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&x->props.saddr);
116 if (spi) 119 if (spi)
117 t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr, 120 t = xfrm_state_lookup(net, (xfrm_address_t *)&x->id.daddr,
118 spi, IPPROTO_IPV6, AF_INET6); 121 spi, IPPROTO_IPV6, AF_INET6);
119 if (!t) { 122 if (!t) {
120 t = ipcomp6_tunnel_create(x); 123 t = ipcomp6_tunnel_create(x);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 1f9c44442e6..25f6cca79e6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2646,7 +2646,7 @@ static const struct file_operations igmp6_mcf_seq_fops = {
2646 .release = seq_release_net, 2646 .release = seq_release_net,
2647}; 2647};
2648 2648
2649static int igmp6_proc_init(struct net *net) 2649static int __net_init igmp6_proc_init(struct net *net)
2650{ 2650{
2651 int err; 2651 int err;
2652 2652
@@ -2666,23 +2666,22 @@ out_proc_net_igmp6:
2666 goto out; 2666 goto out;
2667} 2667}
2668 2668
2669static void igmp6_proc_exit(struct net *net) 2669static void __net_exit igmp6_proc_exit(struct net *net)
2670{ 2670{
2671 proc_net_remove(net, "mcfilter6"); 2671 proc_net_remove(net, "mcfilter6");
2672 proc_net_remove(net, "igmp6"); 2672 proc_net_remove(net, "igmp6");
2673} 2673}
2674#else 2674#else
2675static int igmp6_proc_init(struct net *net) 2675static inline int igmp6_proc_init(struct net *net)
2676{ 2676{
2677 return 0; 2677 return 0;
2678} 2678}
2679static void igmp6_proc_exit(struct net *net) 2679static inline void igmp6_proc_exit(struct net *net)
2680{ 2680{
2681 ;
2682} 2681}
2683#endif 2682#endif
2684 2683
2685static int igmp6_net_init(struct net *net) 2684static int __net_init igmp6_net_init(struct net *net)
2686{ 2685{
2687 int err; 2686 int err;
2688 2687
@@ -2708,7 +2707,7 @@ out_sock_create:
2708 goto out; 2707 goto out;
2709} 2708}
2710 2709
2711static void igmp6_net_exit(struct net *net) 2710static void __net_exit igmp6_net_exit(struct net *net)
2712{ 2711{
2713 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2712 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2714 igmp6_proc_exit(net); 2713 igmp6_proc_exit(net);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c4585279809..2dfec6bb8ad 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1772,7 +1772,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *bu
1772 1772
1773#endif 1773#endif
1774 1774
1775static int ndisc_net_init(struct net *net) 1775static int __net_init ndisc_net_init(struct net *net)
1776{ 1776{
1777 struct ipv6_pinfo *np; 1777 struct ipv6_pinfo *np;
1778 struct sock *sk; 1778 struct sock *sk;
@@ -1797,7 +1797,7 @@ static int ndisc_net_init(struct net *net)
1797 return 0; 1797 return 0;
1798} 1798}
1799 1799
1800static void ndisc_net_exit(struct net *net) 1800static void __net_exit ndisc_net_exit(struct net *net)
1801{ 1801{
1802 inet_ctl_sock_destroy(net->ipv6.ndisc_sk); 1802 inet_ctl_sock_destroy(net->ipv6.ndisc_sk);
1803} 1803}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index c9605c3ad91..bfe2598dd56 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -59,7 +59,7 @@ static const struct file_operations sockstat6_seq_fops = {
59 .release = single_release_net, 59 .release = single_release_net,
60}; 60};
61 61
62static struct snmp_mib snmp6_ipstats_list[] = { 62static const struct snmp_mib snmp6_ipstats_list[] = {
63/* ipv6 mib according to RFC 2465 */ 63/* ipv6 mib according to RFC 2465 */
64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS), 64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS),
65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS), 65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
@@ -92,7 +92,7 @@ static struct snmp_mib snmp6_ipstats_list[] = {
92 SNMP_MIB_SENTINEL 92 SNMP_MIB_SENTINEL
93}; 93};
94 94
95static struct snmp_mib snmp6_icmp6_list[] = { 95static const struct snmp_mib snmp6_icmp6_list[] = {
96/* icmpv6 mib according to RFC 2466 */ 96/* icmpv6 mib according to RFC 2466 */
97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), 97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), 98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
@@ -120,7 +120,7 @@ static const char *const icmp6type2name[256] = {
120}; 120};
121 121
122 122
123static struct snmp_mib snmp6_udp6_list[] = { 123static const struct snmp_mib snmp6_udp6_list[] = {
124 SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS), 124 SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS),
125 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), 125 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
126 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), 126 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
@@ -128,7 +128,7 @@ static struct snmp_mib snmp6_udp6_list[] = {
128 SNMP_MIB_SENTINEL 128 SNMP_MIB_SENTINEL
129}; 129};
130 130
131static struct snmp_mib snmp6_udplite6_list[] = { 131static const struct snmp_mib snmp6_udplite6_list[] = {
132 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS), 132 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS),
133 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), 133 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
134 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), 134 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS),
@@ -170,8 +170,8 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
170 return; 170 return;
171} 171}
172 172
173static inline void 173static void snmp6_seq_show_item(struct seq_file *seq, void **mib,
174snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist) 174 const struct snmp_mib *itemlist)
175{ 175{
176 int i; 176 int i;
177 for (i=0; itemlist[i].name; i++) 177 for (i=0; itemlist[i].name; i++)
@@ -259,7 +259,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
259 struct net *net = dev_net(idev->dev); 259 struct net *net = dev_net(idev->dev);
260 if (!net->mib.proc_net_devsnmp6) 260 if (!net->mib.proc_net_devsnmp6)
261 return -ENOENT; 261 return -ENOENT;
262 if (!idev || !idev->stats.proc_dir_entry) 262 if (!idev->stats.proc_dir_entry)
263 return -EINVAL; 263 return -EINVAL;
264 remove_proc_entry(idev->stats.proc_dir_entry->name, 264 remove_proc_entry(idev->stats.proc_dir_entry->name,
265 net->mib.proc_net_devsnmp6); 265 net->mib.proc_net_devsnmp6);
@@ -267,7 +267,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
267 return 0; 267 return 0;
268} 268}
269 269
270static int ipv6_proc_init_net(struct net *net) 270static int __net_init ipv6_proc_init_net(struct net *net)
271{ 271{
272 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO, 272 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO,
273 &sockstat6_seq_fops)) 273 &sockstat6_seq_fops))
@@ -288,7 +288,7 @@ proc_dev_snmp6_fail:
288 return -ENOMEM; 288 return -ENOMEM;
289} 289}
290 290
291static void ipv6_proc_exit_net(struct net *net) 291static void __net_exit ipv6_proc_exit_net(struct net *net)
292{ 292{
293 proc_net_remove(net, "sockstat6"); 293 proc_net_remove(net, "sockstat6");
294 proc_net_remove(net, "dev_snmp6"); 294 proc_net_remove(net, "dev_snmp6");
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 926ce8eeffa..ed31c37c6e3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1275,7 +1275,7 @@ static const struct file_operations raw6_seq_fops = {
1275 .release = seq_release_net, 1275 .release = seq_release_net,
1276}; 1276};
1277 1277
1278static int raw6_init_net(struct net *net) 1278static int __net_init raw6_init_net(struct net *net)
1279{ 1279{
1280 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops)) 1280 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops))
1281 return -ENOMEM; 1281 return -ENOMEM;
@@ -1283,7 +1283,7 @@ static int raw6_init_net(struct net *net)
1283 return 0; 1283 return 0;
1284} 1284}
1285 1285
1286static void raw6_exit_net(struct net *net) 1286static void __net_exit raw6_exit_net(struct net *net)
1287{ 1287{
1288 proc_net_remove(net, "raw6"); 1288 proc_net_remove(net, "raw6");
1289} 1289}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2cddea3bd6b..fa38fc7cc6e 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -672,7 +672,7 @@ static struct ctl_table ip6_frags_ctl_table[] = {
672 { } 672 { }
673}; 673};
674 674
675static int ip6_frags_ns_sysctl_register(struct net *net) 675static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
676{ 676{
677 struct ctl_table *table; 677 struct ctl_table *table;
678 struct ctl_table_header *hdr; 678 struct ctl_table_header *hdr;
@@ -702,7 +702,7 @@ err_alloc:
702 return -ENOMEM; 702 return -ENOMEM;
703} 703}
704 704
705static void ip6_frags_ns_sysctl_unregister(struct net *net) 705static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
706{ 706{
707 struct ctl_table *table; 707 struct ctl_table *table;
708 708
@@ -745,7 +745,7 @@ static inline void ip6_frags_sysctl_unregister(void)
745} 745}
746#endif 746#endif
747 747
748static int ipv6_frags_init_net(struct net *net) 748static int __net_init ipv6_frags_init_net(struct net *net)
749{ 749{
750 net->ipv6.frags.high_thresh = 256 * 1024; 750 net->ipv6.frags.high_thresh = 256 * 1024;
751 net->ipv6.frags.low_thresh = 192 * 1024; 751 net->ipv6.frags.low_thresh = 192 * 1024;
@@ -756,7 +756,7 @@ static int ipv6_frags_init_net(struct net *net)
756 return ip6_frags_ns_sysctl_register(net); 756 return ip6_frags_ns_sysctl_register(net);
757} 757}
758 758
759static void ipv6_frags_exit_net(struct net *net) 759static void __net_exit ipv6_frags_exit_net(struct net *net)
760{ 760{
761 ip6_frags_ns_sysctl_unregister(net); 761 ip6_frags_ns_sysctl_unregister(net);
762 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 762 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c2bd74c5f8d..8500156f263 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2612,7 +2612,7 @@ ctl_table ipv6_route_table_template[] = {
2612 { } 2612 { }
2613}; 2613};
2614 2614
2615struct ctl_table *ipv6_route_sysctl_init(struct net *net) 2615struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2616{ 2616{
2617 struct ctl_table *table; 2617 struct ctl_table *table;
2618 2618
@@ -2637,7 +2637,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2637} 2637}
2638#endif 2638#endif
2639 2639
2640static int ip6_route_net_init(struct net *net) 2640static int __net_init ip6_route_net_init(struct net *net)
2641{ 2641{
2642 int ret = -ENOMEM; 2642 int ret = -ENOMEM;
2643 2643
@@ -2702,7 +2702,7 @@ out_ip6_dst_ops:
2702 goto out; 2702 goto out;
2703} 2703}
2704 2704
2705static void ip6_route_net_exit(struct net *net) 2705static void __net_exit ip6_route_net_exit(struct net *net)
2706{ 2706{
2707#ifdef CONFIG_PROC_FS 2707#ifdef CONFIG_PROC_FS
2708 proc_net_remove(net, "ipv6_route"); 2708 proc_net_remove(net, "ipv6_route");
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 976e68244b9..10207cc8cc0 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -62,7 +62,6 @@
62#define HASH_SIZE 16 62#define HASH_SIZE 16
63#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 63#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
64 64
65static void ipip6_fb_tunnel_init(struct net_device *dev);
66static void ipip6_tunnel_init(struct net_device *dev); 65static void ipip6_tunnel_init(struct net_device *dev);
67static void ipip6_tunnel_setup(struct net_device *dev); 66static void ipip6_tunnel_setup(struct net_device *dev);
68 67
@@ -1120,7 +1119,7 @@ static void ipip6_tunnel_init(struct net_device *dev)
1120 ipip6_tunnel_bind_dev(dev); 1119 ipip6_tunnel_bind_dev(dev);
1121} 1120}
1122 1121
1123static void ipip6_fb_tunnel_init(struct net_device *dev) 1122static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1124{ 1123{
1125 struct ip_tunnel *tunnel = netdev_priv(dev); 1124 struct ip_tunnel *tunnel = netdev_priv(dev);
1126 struct iphdr *iph = &tunnel->parms.iph; 1125 struct iphdr *iph = &tunnel->parms.iph;
@@ -1145,7 +1144,7 @@ static struct xfrm_tunnel sit_handler = {
1145 .priority = 1, 1144 .priority = 1,
1146}; 1145};
1147 1146
1148static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) 1147static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1149{ 1148{
1150 int prio; 1149 int prio;
1151 1150
@@ -1162,7 +1161,7 @@ static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1162 } 1161 }
1163} 1162}
1164 1163
1165static int sit_init_net(struct net *net) 1164static int __net_init sit_init_net(struct net *net)
1166{ 1165{
1167 struct sit_net *sitn = net_generic(net, sit_net_id); 1166 struct sit_net *sitn = net_generic(net, sit_net_id);
1168 int err; 1167 int err;
@@ -1195,7 +1194,7 @@ err_alloc_dev:
1195 return err; 1194 return err;
1196} 1195}
1197 1196
1198static void sit_exit_net(struct net *net) 1197static void __net_exit sit_exit_net(struct net *net)
1199{ 1198{
1200 struct sit_net *sitn = net_generic(net, sit_net_id); 1199 struct sit_net *sitn = net_generic(net, sit_net_id);
1201 LIST_HEAD(list); 1200 LIST_HEAD(list);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7208a06576c..34d1f0690d7 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -269,7 +269,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
270 tcp_select_initial_window(tcp_full_space(sk), req->mss, 270 tcp_select_initial_window(tcp_full_space(sk), req->mss,
271 &req->rcv_wnd, &req->window_clamp, 271 &req->rcv_wnd, &req->window_clamp,
272 ireq->wscale_ok, &rcv_wscale); 272 ireq->wscale_ok, &rcv_wscale,
273 dst_metric(dst, RTAX_INITRWND));
273 274
274 ireq->rcv_wscale = rcv_wscale; 275 ireq->rcv_wscale = rcv_wscale;
275 276
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index c690736885b..f841d93bf98 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -55,7 +55,7 @@ struct ctl_path net_ipv6_ctl_path[] = {
55}; 55};
56EXPORT_SYMBOL_GPL(net_ipv6_ctl_path); 56EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
57 57
58static int ipv6_sysctl_net_init(struct net *net) 58static int __net_init ipv6_sysctl_net_init(struct net *net)
59{ 59{
60 struct ctl_table *ipv6_table; 60 struct ctl_table *ipv6_table;
61 struct ctl_table *ipv6_route_table; 61 struct ctl_table *ipv6_route_table;
@@ -98,7 +98,7 @@ out_ipv6_table:
98 goto out; 98 goto out;
99} 99}
100 100
101static void ipv6_sysctl_net_exit(struct net *net) 101static void __net_exit ipv6_sysctl_net_exit(struct net *net)
102{ 102{
103 struct ctl_table *ipv6_table; 103 struct ctl_table *ipv6_table;
104 struct ctl_table *ipv6_route_table; 104 struct ctl_table *ipv6_route_table;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index febfd595a40..6963a6b6763 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -520,6 +520,13 @@ done:
520 return err; 520 return err;
521} 521}
522 522
523static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
524 struct request_values *rvp)
525{
526 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
527 return tcp_v6_send_synack(sk, req, rvp);
528}
529
523static inline void syn_flood_warning(struct sk_buff *skb) 530static inline void syn_flood_warning(struct sk_buff *skb)
524{ 531{
525#ifdef CONFIG_SYN_COOKIES 532#ifdef CONFIG_SYN_COOKIES
@@ -876,7 +883,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
876 883
877 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 884 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
878 if (net_ratelimit()) { 885 if (net_ratelimit()) {
879 printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n", 886 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
880 genhash ? "failed" : "mismatch", 887 genhash ? "failed" : "mismatch",
881 &ip6h->saddr, ntohs(th->source), 888 &ip6h->saddr, ntohs(th->source),
882 &ip6h->daddr, ntohs(th->dest)); 889 &ip6h->daddr, ntohs(th->dest));
@@ -890,10 +897,11 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
890struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 897struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
891 .family = AF_INET6, 898 .family = AF_INET6,
892 .obj_size = sizeof(struct tcp6_request_sock), 899 .obj_size = sizeof(struct tcp6_request_sock),
893 .rtx_syn_ack = tcp_v6_send_synack, 900 .rtx_syn_ack = tcp_v6_rtx_synack,
894 .send_ack = tcp_v6_reqsk_send_ack, 901 .send_ack = tcp_v6_reqsk_send_ack,
895 .destructor = tcp_v6_reqsk_destructor, 902 .destructor = tcp_v6_reqsk_destructor,
896 .send_reset = tcp_v6_send_reset 903 .send_reset = tcp_v6_send_reset,
904 .syn_ack_timeout = tcp_syn_ack_timeout,
897}; 905};
898 906
899#ifdef CONFIG_TCP_MD5SIG 907#ifdef CONFIG_TCP_MD5SIG
@@ -2105,7 +2113,7 @@ static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2105 }, 2113 },
2106}; 2114};
2107 2115
2108int tcp6_proc_init(struct net *net) 2116int __net_init tcp6_proc_init(struct net *net)
2109{ 2117{
2110 return tcp_proc_register(net, &tcp6_seq_afinfo); 2118 return tcp_proc_register(net, &tcp6_seq_afinfo);
2111} 2119}
@@ -2174,18 +2182,18 @@ static struct inet_protosw tcpv6_protosw = {
2174 INET_PROTOSW_ICSK, 2182 INET_PROTOSW_ICSK,
2175}; 2183};
2176 2184
2177static int tcpv6_net_init(struct net *net) 2185static int __net_init tcpv6_net_init(struct net *net)
2178{ 2186{
2179 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, 2187 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2180 SOCK_RAW, IPPROTO_TCP, net); 2188 SOCK_RAW, IPPROTO_TCP, net);
2181} 2189}
2182 2190
2183static void tcpv6_net_exit(struct net *net) 2191static void __net_exit tcpv6_net_exit(struct net *net)
2184{ 2192{
2185 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 2193 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2186} 2194}
2187 2195
2188static void tcpv6_net_exit_batch(struct list_head *net_exit_list) 2196static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2189{ 2197{
2190 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); 2198 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2191} 2199}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 69ebdbe78c4..34efb3589ff 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1396,7 +1396,7 @@ static struct udp_seq_afinfo udp6_seq_afinfo = {
1396 }, 1396 },
1397}; 1397};
1398 1398
1399int udp6_proc_init(struct net *net) 1399int __net_init udp6_proc_init(struct net *net)
1400{ 1400{
1401 return udp_proc_register(net, &udp6_seq_afinfo); 1401 return udp_proc_register(net, &udp6_seq_afinfo);
1402} 1402}
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 6ea6938919e..5f48fadc27f 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -104,12 +104,12 @@ static struct udp_seq_afinfo udplite6_seq_afinfo = {
104 }, 104 },
105}; 105};
106 106
107static int udplite6_proc_init_net(struct net *net) 107static int __net_init udplite6_proc_init_net(struct net *net)
108{ 108{
109 return udp_proc_register(net, &udplite6_seq_afinfo); 109 return udp_proc_register(net, &udplite6_seq_afinfo);
110} 110}
111 111
112static void udplite6_proc_exit_net(struct net *net) 112static void __net_exit udplite6_proc_exit_net(struct net *net)
113{ 113{
114 udp_proc_unregister(net, &udplite6_seq_afinfo); 114 udp_proc_unregister(net, &udplite6_seq_afinfo);
115} 115}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 438831d3359..d6f9aeec69f 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -30,6 +30,25 @@
30#include <linux/ipv6.h> 30#include <linux/ipv6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33#include <net/netns/generic.h>
34
35#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
36#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
37
38#define XFRM6_TUNNEL_SPI_MIN 1
39#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
40
41struct xfrm6_tunnel_net {
42 struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
43 struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
44 u32 spi;
45};
46
47static int xfrm6_tunnel_net_id __read_mostly;
48static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
49{
50 return net_generic(net, xfrm6_tunnel_net_id);
51}
33 52
34/* 53/*
35 * xfrm_tunnel_spi things are for allocating unique id ("spi") 54 * xfrm_tunnel_spi things are for allocating unique id ("spi")
@@ -46,19 +65,8 @@ struct xfrm6_tunnel_spi {
46 65
47static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); 66static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
48 67
49static u32 xfrm6_tunnel_spi;
50
51#define XFRM6_TUNNEL_SPI_MIN 1
52#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
53
54static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 68static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
55 69
56#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
57#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
58
59static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
60static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
61
62static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 70static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
63{ 71{
64 unsigned h; 72 unsigned h;
@@ -77,49 +85,30 @@ static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
77} 85}
78 86
79 87
80static int xfrm6_tunnel_spi_init(void) 88static int __init xfrm6_tunnel_spi_init(void)
81{ 89{
82 int i;
83
84 xfrm6_tunnel_spi = 0;
85 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", 90 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
86 sizeof(struct xfrm6_tunnel_spi), 91 sizeof(struct xfrm6_tunnel_spi),
87 0, SLAB_HWCACHE_ALIGN, 92 0, SLAB_HWCACHE_ALIGN,
88 NULL); 93 NULL);
89 if (!xfrm6_tunnel_spi_kmem) 94 if (!xfrm6_tunnel_spi_kmem)
90 return -ENOMEM; 95 return -ENOMEM;
91
92 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
93 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
94 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
95 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
96 return 0; 96 return 0;
97} 97}
98 98
99static void xfrm6_tunnel_spi_fini(void) 99static void xfrm6_tunnel_spi_fini(void)
100{ 100{
101 int i;
102
103 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
104 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
105 return;
106 }
107 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
108 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
109 return;
110 }
111 rcu_barrier();
112 kmem_cache_destroy(xfrm6_tunnel_spi_kmem); 101 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
113 xfrm6_tunnel_spi_kmem = NULL;
114} 102}
115 103
116static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 104static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
117{ 105{
106 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
118 struct xfrm6_tunnel_spi *x6spi; 107 struct xfrm6_tunnel_spi *x6spi;
119 struct hlist_node *pos; 108 struct hlist_node *pos;
120 109
121 hlist_for_each_entry_rcu(x6spi, pos, 110 hlist_for_each_entry_rcu(x6spi, pos,
122 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 111 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
123 list_byaddr) { 112 list_byaddr) {
124 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) 113 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
125 return x6spi; 114 return x6spi;
@@ -128,13 +117,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
128 return NULL; 117 return NULL;
129} 118}
130 119
131__be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 120__be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
132{ 121{
133 struct xfrm6_tunnel_spi *x6spi; 122 struct xfrm6_tunnel_spi *x6spi;
134 u32 spi; 123 u32 spi;
135 124
136 rcu_read_lock_bh(); 125 rcu_read_lock_bh();
137 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 126 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
138 spi = x6spi ? x6spi->spi : 0; 127 spi = x6spi ? x6spi->spi : 0;
139 rcu_read_unlock_bh(); 128 rcu_read_unlock_bh();
140 return htonl(spi); 129 return htonl(spi);
@@ -142,14 +131,15 @@ __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
142 131
143EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); 132EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
144 133
145static int __xfrm6_tunnel_spi_check(u32 spi) 134static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
146{ 135{
136 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
147 struct xfrm6_tunnel_spi *x6spi; 137 struct xfrm6_tunnel_spi *x6spi;
148 int index = xfrm6_tunnel_spi_hash_byspi(spi); 138 int index = xfrm6_tunnel_spi_hash_byspi(spi);
149 struct hlist_node *pos; 139 struct hlist_node *pos;
150 140
151 hlist_for_each_entry(x6spi, pos, 141 hlist_for_each_entry(x6spi, pos,
152 &xfrm6_tunnel_spi_byspi[index], 142 &xfrm6_tn->spi_byspi[index],
153 list_byspi) { 143 list_byspi) {
154 if (x6spi->spi == spi) 144 if (x6spi->spi == spi)
155 return -1; 145 return -1;
@@ -157,32 +147,33 @@ static int __xfrm6_tunnel_spi_check(u32 spi)
157 return index; 147 return index;
158} 148}
159 149
160static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 150static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
161{ 151{
152 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
162 u32 spi; 153 u32 spi;
163 struct xfrm6_tunnel_spi *x6spi; 154 struct xfrm6_tunnel_spi *x6spi;
164 int index; 155 int index;
165 156
166 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || 157 if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
167 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) 158 xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
168 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; 159 xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
169 else 160 else
170 xfrm6_tunnel_spi++; 161 xfrm6_tn->spi++;
171 162
172 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { 163 for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
173 index = __xfrm6_tunnel_spi_check(spi); 164 index = __xfrm6_tunnel_spi_check(net, spi);
174 if (index >= 0) 165 if (index >= 0)
175 goto alloc_spi; 166 goto alloc_spi;
176 } 167 }
177 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) { 168 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
178 index = __xfrm6_tunnel_spi_check(spi); 169 index = __xfrm6_tunnel_spi_check(net, spi);
179 if (index >= 0) 170 if (index >= 0)
180 goto alloc_spi; 171 goto alloc_spi;
181 } 172 }
182 spi = 0; 173 spi = 0;
183 goto out; 174 goto out;
184alloc_spi: 175alloc_spi:
185 xfrm6_tunnel_spi = spi; 176 xfrm6_tn->spi = spi;
186 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); 177 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
187 if (!x6spi) 178 if (!x6spi)
188 goto out; 179 goto out;
@@ -192,26 +183,26 @@ alloc_spi:
192 x6spi->spi = spi; 183 x6spi->spi = spi;
193 atomic_set(&x6spi->refcnt, 1); 184 atomic_set(&x6spi->refcnt, 1);
194 185
195 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); 186 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
196 187
197 index = xfrm6_tunnel_spi_hash_byaddr(saddr); 188 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
198 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); 189 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
199out: 190out:
200 return spi; 191 return spi;
201} 192}
202 193
203__be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 194__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
204{ 195{
205 struct xfrm6_tunnel_spi *x6spi; 196 struct xfrm6_tunnel_spi *x6spi;
206 u32 spi; 197 u32 spi;
207 198
208 spin_lock_bh(&xfrm6_tunnel_spi_lock); 199 spin_lock_bh(&xfrm6_tunnel_spi_lock);
209 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 200 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
210 if (x6spi) { 201 if (x6spi) {
211 atomic_inc(&x6spi->refcnt); 202 atomic_inc(&x6spi->refcnt);
212 spi = x6spi->spi; 203 spi = x6spi->spi;
213 } else 204 } else
214 spi = __xfrm6_tunnel_alloc_spi(saddr); 205 spi = __xfrm6_tunnel_alloc_spi(net, saddr);
215 spin_unlock_bh(&xfrm6_tunnel_spi_lock); 206 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
216 207
217 return htonl(spi); 208 return htonl(spi);
@@ -225,15 +216,16 @@ static void x6spi_destroy_rcu(struct rcu_head *head)
225 container_of(head, struct xfrm6_tunnel_spi, rcu_head)); 216 container_of(head, struct xfrm6_tunnel_spi, rcu_head));
226} 217}
227 218
228void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) 219void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
229{ 220{
221 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
230 struct xfrm6_tunnel_spi *x6spi; 222 struct xfrm6_tunnel_spi *x6spi;
231 struct hlist_node *pos, *n; 223 struct hlist_node *pos, *n;
232 224
233 spin_lock_bh(&xfrm6_tunnel_spi_lock); 225 spin_lock_bh(&xfrm6_tunnel_spi_lock);
234 226
235 hlist_for_each_entry_safe(x6spi, pos, n, 227 hlist_for_each_entry_safe(x6spi, pos, n,
236 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 228 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
237 list_byaddr) 229 list_byaddr)
238 { 230 {
239 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 231 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
@@ -263,10 +255,11 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
263 255
264static int xfrm6_tunnel_rcv(struct sk_buff *skb) 256static int xfrm6_tunnel_rcv(struct sk_buff *skb)
265{ 257{
258 struct net *net = dev_net(skb->dev);
266 struct ipv6hdr *iph = ipv6_hdr(skb); 259 struct ipv6hdr *iph = ipv6_hdr(skb);
267 __be32 spi; 260 __be32 spi;
268 261
269 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); 262 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr);
270 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 263 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
271} 264}
272 265
@@ -326,7 +319,9 @@ static int xfrm6_tunnel_init_state(struct xfrm_state *x)
326 319
327static void xfrm6_tunnel_destroy(struct xfrm_state *x) 320static void xfrm6_tunnel_destroy(struct xfrm_state *x)
328{ 321{
329 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); 322 struct net *net = xs_net(x);
323
324 xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
330} 325}
331 326
332static const struct xfrm_type xfrm6_tunnel_type = { 327static const struct xfrm_type xfrm6_tunnel_type = {
@@ -351,18 +346,54 @@ static struct xfrm6_tunnel xfrm46_tunnel_handler = {
351 .priority = 2, 346 .priority = 2,
352}; 347};
353 348
349static int __net_init xfrm6_tunnel_net_init(struct net *net)
350{
351 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
352 unsigned int i;
353
354 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
355 INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
356 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
357 INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
358 xfrm6_tn->spi = 0;
359
360 return 0;
361}
362
363static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
364{
365}
366
367static struct pernet_operations xfrm6_tunnel_net_ops = {
368 .init = xfrm6_tunnel_net_init,
369 .exit = xfrm6_tunnel_net_exit,
370 .id = &xfrm6_tunnel_net_id,
371 .size = sizeof(struct xfrm6_tunnel_net),
372};
373
354static int __init xfrm6_tunnel_init(void) 374static int __init xfrm6_tunnel_init(void)
355{ 375{
356 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) 376 int rv;
377
378 rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
379 if (rv < 0)
357 goto err; 380 goto err;
358 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6)) 381 rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
382 if (rv < 0)
359 goto unreg; 383 goto unreg;
360 if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET)) 384 rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
385 if (rv < 0)
361 goto dereg6; 386 goto dereg6;
362 if (xfrm6_tunnel_spi_init() < 0) 387 rv = xfrm6_tunnel_spi_init();
388 if (rv < 0)
363 goto dereg46; 389 goto dereg46;
390 rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
391 if (rv < 0)
392 goto deregspi;
364 return 0; 393 return 0;
365 394
395deregspi:
396 xfrm6_tunnel_spi_fini();
366dereg46: 397dereg46:
367 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); 398 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
368dereg6: 399dereg6:
@@ -370,11 +401,12 @@ dereg6:
370unreg: 401unreg:
371 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 402 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
372err: 403err:
373 return -EAGAIN; 404 return rv;
374} 405}
375 406
376static void __exit xfrm6_tunnel_fini(void) 407static void __exit xfrm6_tunnel_fini(void)
377{ 408{
409 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
378 xfrm6_tunnel_spi_fini(); 410 xfrm6_tunnel_spi_fini();
379 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); 411 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
380 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 412 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 811984d9324..8b85d774e47 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -496,9 +496,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
496 496
497 IRDA_DEBUG(0, "%s()\n", __func__ ); 497 IRDA_DEBUG(0, "%s()\n", __func__ );
498 498
499 if (!tty)
500 return;
501
502 IRDA_ASSERT(self != NULL, return;); 499 IRDA_ASSERT(self != NULL, return;);
503 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 500 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
504 501
@@ -1007,9 +1004,6 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
1007 IRDA_ASSERT(self != NULL, return;); 1004 IRDA_ASSERT(self != NULL, return;);
1008 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 1005 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
1009 1006
1010 if (!tty)
1011 return;
1012
1013 /* ircomm_tty_flush_buffer(tty); */ 1007 /* ircomm_tty_flush_buffer(tty); */
1014 ircomm_tty_shutdown(self); 1008 ircomm_tty_shutdown(self);
1015 1009
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 539f43bc97d..41dd2cb07ef 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3019,12 +3019,11 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_e
3019static u32 get_acqseq(void) 3019static u32 get_acqseq(void)
3020{ 3020{
3021 u32 res; 3021 u32 res;
3022 static u32 acqseq; 3022 static atomic_t acqseq;
3023 static DEFINE_SPINLOCK(acqseq_lock);
3024 3023
3025 spin_lock_bh(&acqseq_lock); 3024 do {
3026 res = (++acqseq ? : ++acqseq); 3025 res = atomic_inc_return(&acqseq);
3027 spin_unlock_bh(&acqseq_lock); 3026 } while (!res);
3028 return res; 3027 return res;
3029} 3028}
3030 3029
@@ -3738,17 +3737,17 @@ static int __net_init pfkey_init_proc(struct net *net)
3738 return 0; 3737 return 0;
3739} 3738}
3740 3739
3741static void pfkey_exit_proc(struct net *net) 3740static void __net_exit pfkey_exit_proc(struct net *net)
3742{ 3741{
3743 proc_net_remove(net, "pfkey"); 3742 proc_net_remove(net, "pfkey");
3744} 3743}
3745#else 3744#else
3746static int __net_init pfkey_init_proc(struct net *net) 3745static inline int pfkey_init_proc(struct net *net)
3747{ 3746{
3748 return 0; 3747 return 0;
3749} 3748}
3750 3749
3751static void pfkey_exit_proc(struct net *net) 3750static inline void pfkey_exit_proc(struct net *net)
3752{ 3751{
3753} 3752}
3754#endif 3753#endif
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 3a66546cad0..e35d907fba2 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -47,6 +47,10 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
47#define dprintk(args...) 47#define dprintk(args...)
48#endif 48#endif
49 49
50/* Maybe we'll add some more in the future. */
51#define LLC_CMSG_PKTINFO 1
52
53
50/** 54/**
51 * llc_ui_next_link_no - return the next unused link number for a sap 55 * llc_ui_next_link_no - return the next unused link number for a sap
52 * @sap: Address of sap to get link number from. 56 * @sap: Address of sap to get link number from.
@@ -136,6 +140,7 @@ static struct proto llc_proto = {
136 .name = "LLC", 140 .name = "LLC",
137 .owner = THIS_MODULE, 141 .owner = THIS_MODULE,
138 .obj_size = sizeof(struct llc_sock), 142 .obj_size = sizeof(struct llc_sock),
143 .slab_flags = SLAB_DESTROY_BY_RCU,
139}; 144};
140 145
141/** 146/**
@@ -192,10 +197,8 @@ static int llc_ui_release(struct socket *sock)
192 llc->laddr.lsap, llc->daddr.lsap); 197 llc->laddr.lsap, llc->daddr.lsap);
193 if (!llc_send_disc(sk)) 198 if (!llc_send_disc(sk))
194 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); 199 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
195 if (!sock_flag(sk, SOCK_ZAPPED)) { 200 if (!sock_flag(sk, SOCK_ZAPPED))
196 llc_sap_put(llc->sap);
197 llc_sap_remove_socket(llc->sap, sk); 201 llc_sap_remove_socket(llc->sap, sk);
198 }
199 release_sock(sk); 202 release_sock(sk);
200 if (llc->dev) 203 if (llc->dev)
201 dev_put(llc->dev); 204 dev_put(llc->dev);
@@ -255,7 +258,14 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
255 if (!sock_flag(sk, SOCK_ZAPPED)) 258 if (!sock_flag(sk, SOCK_ZAPPED))
256 goto out; 259 goto out;
257 rc = -ENODEV; 260 rc = -ENODEV;
258 llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); 261 if (sk->sk_bound_dev_if) {
262 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
263 if (llc->dev && addr->sllc_arphrd != llc->dev->type) {
264 dev_put(llc->dev);
265 llc->dev = NULL;
266 }
267 } else
268 llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
259 if (!llc->dev) 269 if (!llc->dev)
260 goto out; 270 goto out;
261 rc = -EUSERS; 271 rc = -EUSERS;
@@ -306,7 +316,25 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
306 goto out; 316 goto out;
307 rc = -ENODEV; 317 rc = -ENODEV;
308 rtnl_lock(); 318 rtnl_lock();
309 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, addr->sllc_mac); 319 if (sk->sk_bound_dev_if) {
320 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
321 if (llc->dev) {
322 if (!addr->sllc_arphrd)
323 addr->sllc_arphrd = llc->dev->type;
324 if (llc_mac_null(addr->sllc_mac))
325 memcpy(addr->sllc_mac, llc->dev->dev_addr,
326 IFHWADDRLEN);
327 if (addr->sllc_arphrd != llc->dev->type ||
328 !llc_mac_match(addr->sllc_mac,
329 llc->dev->dev_addr)) {
330 rc = -EINVAL;
331 dev_put(llc->dev);
332 llc->dev = NULL;
333 }
334 }
335 } else
336 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
337 addr->sllc_mac);
310 rtnl_unlock(); 338 rtnl_unlock();
311 if (!llc->dev) 339 if (!llc->dev)
312 goto out; 340 goto out;
@@ -322,7 +350,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
322 rc = -EBUSY; /* some other network layer is using the sap */ 350 rc = -EBUSY; /* some other network layer is using the sap */
323 if (!sap) 351 if (!sap)
324 goto out; 352 goto out;
325 llc_sap_hold(sap);
326 } else { 353 } else {
327 struct llc_addr laddr, daddr; 354 struct llc_addr laddr, daddr;
328 struct sock *ask; 355 struct sock *ask;
@@ -591,6 +618,20 @@ static int llc_wait_data(struct sock *sk, long timeo)
591 return rc; 618 return rc;
592} 619}
593 620
621static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
622{
623 struct llc_sock *llc = llc_sk(skb->sk);
624
625 if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
626 struct llc_pktinfo info;
627
628 info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
629 llc_pdu_decode_dsap(skb, &info.lpi_sap);
630 llc_pdu_decode_da(skb, info.lpi_mac);
631 put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info);
632 }
633}
634
594/** 635/**
595 * llc_ui_accept - accept a new incoming connection. 636 * llc_ui_accept - accept a new incoming connection.
596 * @sock: Socket which connections arrive on. 637 * @sock: Socket which connections arrive on.
@@ -812,6 +853,8 @@ copy_uaddr:
812 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); 853 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
813 msg->msg_namelen = sizeof(*uaddr); 854 msg->msg_namelen = sizeof(*uaddr);
814 } 855 }
856 if (llc_sk(sk)->cmsg_flags)
857 llc_cmsg_rcv(msg, skb);
815 goto out; 858 goto out;
816} 859}
817 860
@@ -1030,6 +1073,12 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
1030 goto out; 1073 goto out;
1031 llc->rw = opt; 1074 llc->rw = opt;
1032 break; 1075 break;
1076 case LLC_OPT_PKTINFO:
1077 if (opt)
1078 llc->cmsg_flags |= LLC_CMSG_PKTINFO;
1079 else
1080 llc->cmsg_flags &= ~LLC_CMSG_PKTINFO;
1081 break;
1033 default: 1082 default:
1034 rc = -ENOPROTOOPT; 1083 rc = -ENOPROTOOPT;
1035 goto out; 1084 goto out;
@@ -1083,6 +1132,9 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname,
1083 val = llc->k; break; 1132 val = llc->k; break;
1084 case LLC_OPT_RX_WIN: 1133 case LLC_OPT_RX_WIN:
1085 val = llc->rw; break; 1134 val = llc->rw; break;
1135 case LLC_OPT_PKTINFO:
1136 val = (llc->cmsg_flags & LLC_CMSG_PKTINFO) != 0;
1137 break;
1086 default: 1138 default:
1087 rc = -ENOPROTOOPT; 1139 rc = -ENOPROTOOPT;
1088 goto out; 1140 goto out;
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c6bab39b018..a8dde9b010d 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -468,6 +468,19 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
468 return rc; 468 return rc;
469} 469}
470 470
471static inline bool llc_estab_match(const struct llc_sap *sap,
472 const struct llc_addr *daddr,
473 const struct llc_addr *laddr,
474 const struct sock *sk)
475{
476 struct llc_sock *llc = llc_sk(sk);
477
478 return llc->laddr.lsap == laddr->lsap &&
479 llc->daddr.lsap == daddr->lsap &&
480 llc_mac_match(llc->laddr.mac, laddr->mac) &&
481 llc_mac_match(llc->daddr.mac, daddr->mac);
482}
483
471/** 484/**
472 * __llc_lookup_established - Finds connection for the remote/local sap/mac 485 * __llc_lookup_established - Finds connection for the remote/local sap/mac
473 * @sap: SAP 486 * @sap: SAP
@@ -484,23 +497,35 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
484 struct llc_addr *laddr) 497 struct llc_addr *laddr)
485{ 498{
486 struct sock *rc; 499 struct sock *rc;
487 struct hlist_node *node; 500 struct hlist_nulls_node *node;
488 501 int slot = llc_sk_laddr_hashfn(sap, laddr);
489 read_lock(&sap->sk_list.lock); 502 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
490 sk_for_each(rc, node, &sap->sk_list.list) { 503
491 struct llc_sock *llc = llc_sk(rc); 504 rcu_read_lock();
492 505again:
493 if (llc->laddr.lsap == laddr->lsap && 506 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
494 llc->daddr.lsap == daddr->lsap && 507 if (llc_estab_match(sap, daddr, laddr, rc)) {
495 llc_mac_match(llc->laddr.mac, laddr->mac) && 508 /* Extra checks required by SLAB_DESTROY_BY_RCU */
496 llc_mac_match(llc->daddr.mac, daddr->mac)) { 509 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
497 sock_hold(rc); 510 goto again;
511 if (unlikely(llc_sk(rc)->sap != sap ||
512 !llc_estab_match(sap, daddr, laddr, rc))) {
513 sock_put(rc);
514 continue;
515 }
498 goto found; 516 goto found;
499 } 517 }
500 } 518 }
501 rc = NULL; 519 rc = NULL;
520 /*
521 * if the nulls value we got at the end of this lookup is
522 * not the expected one, we must restart lookup.
523 * We probably met an item that was moved to another chain.
524 */
525 if (unlikely(get_nulls_value(node) != slot))
526 goto again;
502found: 527found:
503 read_unlock(&sap->sk_list.lock); 528 rcu_read_unlock();
504 return rc; 529 return rc;
505} 530}
506 531
@@ -516,6 +541,53 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
516 return sk; 541 return sk;
517} 542}
518 543
544static inline bool llc_listener_match(const struct llc_sap *sap,
545 const struct llc_addr *laddr,
546 const struct sock *sk)
547{
548 struct llc_sock *llc = llc_sk(sk);
549
550 return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
551 llc->laddr.lsap == laddr->lsap &&
552 llc_mac_match(llc->laddr.mac, laddr->mac);
553}
554
555static struct sock *__llc_lookup_listener(struct llc_sap *sap,
556 struct llc_addr *laddr)
557{
558 struct sock *rc;
559 struct hlist_nulls_node *node;
560 int slot = llc_sk_laddr_hashfn(sap, laddr);
561 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
562
563 rcu_read_lock();
564again:
565 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
566 if (llc_listener_match(sap, laddr, rc)) {
567 /* Extra checks required by SLAB_DESTROY_BY_RCU */
568 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
569 goto again;
570 if (unlikely(llc_sk(rc)->sap != sap ||
571 !llc_listener_match(sap, laddr, rc))) {
572 sock_put(rc);
573 continue;
574 }
575 goto found;
576 }
577 }
578 rc = NULL;
579 /*
580 * if the nulls value we got at the end of this lookup is
581 * not the expected one, we must restart lookup.
582 * We probably met an item that was moved to another chain.
583 */
584 if (unlikely(get_nulls_value(node) != slot))
585 goto again;
586found:
587 rcu_read_unlock();
588 return rc;
589}
590
519/** 591/**
520 * llc_lookup_listener - Finds listener for local MAC + SAP 592 * llc_lookup_listener - Finds listener for local MAC + SAP
521 * @sap: SAP 593 * @sap: SAP
@@ -529,24 +601,12 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
529static struct sock *llc_lookup_listener(struct llc_sap *sap, 601static struct sock *llc_lookup_listener(struct llc_sap *sap,
530 struct llc_addr *laddr) 602 struct llc_addr *laddr)
531{ 603{
532 struct sock *rc; 604 static struct llc_addr null_addr;
533 struct hlist_node *node; 605 struct sock *rc = __llc_lookup_listener(sap, laddr);
534 606
535 read_lock(&sap->sk_list.lock); 607 if (!rc)
536 sk_for_each(rc, node, &sap->sk_list.list) { 608 rc = __llc_lookup_listener(sap, &null_addr);
537 struct llc_sock *llc = llc_sk(rc);
538 609
539 if (rc->sk_type == SOCK_STREAM && rc->sk_state == TCP_LISTEN &&
540 llc->laddr.lsap == laddr->lsap &&
541 (llc_mac_match(llc->laddr.mac, laddr->mac) ||
542 llc_mac_null(llc->laddr.mac))) {
543 sock_hold(rc);
544 goto found;
545 }
546 }
547 rc = NULL;
548found:
549 read_unlock(&sap->sk_list.lock);
550 return rc; 610 return rc;
551} 611}
552 612
@@ -647,15 +707,22 @@ static int llc_find_offset(int state, int ev_type)
647 * @sap: SAP 707 * @sap: SAP
648 * @sk: socket 708 * @sk: socket
649 * 709 *
650 * This function adds a socket to sk_list of a SAP. 710 * This function adds a socket to the hash tables of a SAP.
651 */ 711 */
652void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk) 712void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
653{ 713{
714 struct llc_sock *llc = llc_sk(sk);
715 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, llc->dev->ifindex);
716 struct hlist_nulls_head *laddr_hb = llc_sk_laddr_hash(sap, &llc->laddr);
717
654 llc_sap_hold(sap); 718 llc_sap_hold(sap);
655 write_lock_bh(&sap->sk_list.lock);
656 llc_sk(sk)->sap = sap; 719 llc_sk(sk)->sap = sap;
657 sk_add_node(sk, &sap->sk_list.list); 720
658 write_unlock_bh(&sap->sk_list.lock); 721 spin_lock_bh(&sap->sk_lock);
722 sap->sk_count++;
723 sk_nulls_add_node_rcu(sk, laddr_hb);
724 hlist_add_head(&llc->dev_hash_node, dev_hb);
725 spin_unlock_bh(&sap->sk_lock);
659} 726}
660 727
661/** 728/**
@@ -663,14 +730,18 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
663 * @sap: SAP 730 * @sap: SAP
664 * @sk: socket 731 * @sk: socket
665 * 732 *
666 * This function removes a connection from sk_list.list of a SAP if 733 * This function removes a connection from the hash tables of a SAP if
667 * the connection was in this list. 734 * the connection was in this list.
668 */ 735 */
669void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk) 736void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
670{ 737{
671 write_lock_bh(&sap->sk_list.lock); 738 struct llc_sock *llc = llc_sk(sk);
672 sk_del_node_init(sk); 739
673 write_unlock_bh(&sap->sk_list.lock); 740 spin_lock_bh(&sap->sk_lock);
741 sk_nulls_del_node_init_rcu(sk);
742 hlist_del(&llc->dev_hash_node);
743 sap->sk_count--;
744 spin_unlock_bh(&sap->sk_lock);
674 llc_sap_put(sap); 745 llc_sap_put(sap);
675} 746}
676 747
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index ff4c0ab96a6..78167e81dfe 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -23,7 +23,7 @@
23#include <net/llc.h> 23#include <net/llc.h>
24 24
25LIST_HEAD(llc_sap_list); 25LIST_HEAD(llc_sap_list);
26DEFINE_RWLOCK(llc_sap_list_lock); 26DEFINE_SPINLOCK(llc_sap_list_lock);
27 27
28/** 28/**
29 * llc_sap_alloc - allocates and initializes sap. 29 * llc_sap_alloc - allocates and initializes sap.
@@ -33,40 +33,19 @@ DEFINE_RWLOCK(llc_sap_list_lock);
33static struct llc_sap *llc_sap_alloc(void) 33static struct llc_sap *llc_sap_alloc(void)
34{ 34{
35 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC); 35 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
36 int i;
36 37
37 if (sap) { 38 if (sap) {
38 /* sap->laddr.mac - leave as a null, it's filled by bind */ 39 /* sap->laddr.mac - leave as a null, it's filled by bind */
39 sap->state = LLC_SAP_STATE_ACTIVE; 40 sap->state = LLC_SAP_STATE_ACTIVE;
40 rwlock_init(&sap->sk_list.lock); 41 spin_lock_init(&sap->sk_lock);
42 for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++)
43 INIT_HLIST_NULLS_HEAD(&sap->sk_laddr_hash[i], i);
41 atomic_set(&sap->refcnt, 1); 44 atomic_set(&sap->refcnt, 1);
42 } 45 }
43 return sap; 46 return sap;
44} 47}
45 48
46/**
47 * llc_add_sap - add sap to station list
48 * @sap: Address of the sap
49 *
50 * Adds a sap to the LLC's station sap list.
51 */
52static void llc_add_sap(struct llc_sap *sap)
53{
54 list_add_tail(&sap->node, &llc_sap_list);
55}
56
57/**
58 * llc_del_sap - del sap from station list
59 * @sap: Address of the sap
60 *
61 * Removes a sap to the LLC's station sap list.
62 */
63static void llc_del_sap(struct llc_sap *sap)
64{
65 write_lock_bh(&llc_sap_list_lock);
66 list_del(&sap->node);
67 write_unlock_bh(&llc_sap_list_lock);
68}
69
70static struct llc_sap *__llc_sap_find(unsigned char sap_value) 49static struct llc_sap *__llc_sap_find(unsigned char sap_value)
71{ 50{
72 struct llc_sap* sap; 51 struct llc_sap* sap;
@@ -90,13 +69,13 @@ out:
90 */ 69 */
91struct llc_sap *llc_sap_find(unsigned char sap_value) 70struct llc_sap *llc_sap_find(unsigned char sap_value)
92{ 71{
93 struct llc_sap* sap; 72 struct llc_sap *sap;
94 73
95 read_lock_bh(&llc_sap_list_lock); 74 rcu_read_lock_bh();
96 sap = __llc_sap_find(sap_value); 75 sap = __llc_sap_find(sap_value);
97 if (sap) 76 if (sap)
98 llc_sap_hold(sap); 77 llc_sap_hold(sap);
99 read_unlock_bh(&llc_sap_list_lock); 78 rcu_read_unlock_bh();
100 return sap; 79 return sap;
101} 80}
102 81
@@ -117,7 +96,7 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
117{ 96{
118 struct llc_sap *sap = NULL; 97 struct llc_sap *sap = NULL;
119 98
120 write_lock_bh(&llc_sap_list_lock); 99 spin_lock_bh(&llc_sap_list_lock);
121 if (__llc_sap_find(lsap)) /* SAP already exists */ 100 if (__llc_sap_find(lsap)) /* SAP already exists */
122 goto out; 101 goto out;
123 sap = llc_sap_alloc(); 102 sap = llc_sap_alloc();
@@ -125,9 +104,9 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
125 goto out; 104 goto out;
126 sap->laddr.lsap = lsap; 105 sap->laddr.lsap = lsap;
127 sap->rcv_func = func; 106 sap->rcv_func = func;
128 llc_add_sap(sap); 107 list_add_tail_rcu(&sap->node, &llc_sap_list);
129out: 108out:
130 write_unlock_bh(&llc_sap_list_lock); 109 spin_unlock_bh(&llc_sap_list_lock);
131 return sap; 110 return sap;
132} 111}
133 112
@@ -142,8 +121,14 @@ out:
142 */ 121 */
143void llc_sap_close(struct llc_sap *sap) 122void llc_sap_close(struct llc_sap *sap)
144{ 123{
145 WARN_ON(!hlist_empty(&sap->sk_list.list)); 124 WARN_ON(sap->sk_count);
146 llc_del_sap(sap); 125
126 spin_lock_bh(&llc_sap_list_lock);
127 list_del_rcu(&sap->node);
128 spin_unlock_bh(&llc_sap_list_lock);
129
130 synchronize_rcu();
131
147 kfree(sap); 132 kfree(sap);
148} 133}
149 134
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 754f4fedc85..b38a1079a98 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -33,48 +33,19 @@
33int llc_mac_hdr_init(struct sk_buff *skb, 33int llc_mac_hdr_init(struct sk_buff *skb,
34 const unsigned char *sa, const unsigned char *da) 34 const unsigned char *sa, const unsigned char *da)
35{ 35{
36 int rc = 0; 36 int rc = -EINVAL;
37 37
38 switch (skb->dev->type) { 38 switch (skb->dev->type) {
39#ifdef CONFIG_TR 39 case ARPHRD_IEEE802_TR:
40 case ARPHRD_IEEE802_TR: {
41 struct net_device *dev = skb->dev;
42 struct trh_hdr *trh;
43
44 skb_push(skb, sizeof(*trh));
45 skb_reset_mac_header(skb);
46 trh = tr_hdr(skb);
47 trh->ac = AC;
48 trh->fc = LLC_FRAME;
49 if (sa)
50 memcpy(trh->saddr, sa, dev->addr_len);
51 else
52 memset(trh->saddr, 0, dev->addr_len);
53 if (da) {
54 memcpy(trh->daddr, da, dev->addr_len);
55 tr_source_route(skb, trh, dev);
56 skb_reset_mac_header(skb);
57 }
58 break;
59 }
60#endif
61 case ARPHRD_ETHER: 40 case ARPHRD_ETHER:
62 case ARPHRD_LOOPBACK: { 41 case ARPHRD_LOOPBACK:
63 unsigned short len = skb->len; 42 rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
64 struct ethhdr *eth; 43 skb->len);
65 44 if (rc > 0)
66 skb_push(skb, sizeof(*eth)); 45 rc = 0;
67 skb_reset_mac_header(skb);
68 eth = eth_hdr(skb);
69 eth->h_proto = htons(len);
70 memcpy(eth->h_dest, da, ETH_ALEN);
71 memcpy(eth->h_source, sa, ETH_ALEN);
72 break; 46 break;
73 }
74 default: 47 default:
75 printk(KERN_WARNING "device type not supported: %d\n", 48 WARN(1, "device type not supported: %d\n", skb->dev->type);
76 skb->dev->type);
77 rc = -EINVAL;
78 } 49 }
79 return rc; 50 return rc;
80} 51}
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index be47ac427f6..7af1ff2d1f1 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -32,21 +32,23 @@ static void llc_ui_format_mac(struct seq_file *seq, u8 *addr)
32 32
33static struct sock *llc_get_sk_idx(loff_t pos) 33static struct sock *llc_get_sk_idx(loff_t pos)
34{ 34{
35 struct list_head *sap_entry;
36 struct llc_sap *sap; 35 struct llc_sap *sap;
37 struct hlist_node *node;
38 struct sock *sk = NULL; 36 struct sock *sk = NULL;
39 37 int i;
40 list_for_each(sap_entry, &llc_sap_list) { 38
41 sap = list_entry(sap_entry, struct llc_sap, node); 39 list_for_each_entry_rcu(sap, &llc_sap_list, node) {
42 40 spin_lock_bh(&sap->sk_lock);
43 read_lock_bh(&sap->sk_list.lock); 41 for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++) {
44 sk_for_each(sk, node, &sap->sk_list.list) { 42 struct hlist_nulls_head *head = &sap->sk_laddr_hash[i];
45 if (!pos) 43 struct hlist_nulls_node *node;
46 goto found; 44
47 --pos; 45 sk_nulls_for_each(sk, node, head) {
46 if (!pos)
47 goto found; /* keep the lock */
48 --pos;
49 }
48 } 50 }
49 read_unlock_bh(&sap->sk_list.lock); 51 spin_unlock_bh(&sap->sk_lock);
50 } 52 }
51 sk = NULL; 53 sk = NULL;
52found: 54found:
@@ -57,10 +59,23 @@ static void *llc_seq_start(struct seq_file *seq, loff_t *pos)
57{ 59{
58 loff_t l = *pos; 60 loff_t l = *pos;
59 61
60 read_lock_bh(&llc_sap_list_lock); 62 rcu_read_lock_bh();
61 return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN; 63 return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN;
62} 64}
63 65
66static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket)
67{
68 struct hlist_nulls_node *node;
69 struct sock *sk = NULL;
70
71 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES)
72 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
73 goto out;
74
75out:
76 return sk;
77}
78
64static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 79static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
65{ 80{
66 struct sock* sk, *next; 81 struct sock* sk, *next;
@@ -73,25 +88,23 @@ static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
73 goto out; 88 goto out;
74 } 89 }
75 sk = v; 90 sk = v;
76 next = sk_next(sk); 91 next = sk_nulls_next(sk);
77 if (next) { 92 if (next) {
78 sk = next; 93 sk = next;
79 goto out; 94 goto out;
80 } 95 }
81 llc = llc_sk(sk); 96 llc = llc_sk(sk);
82 sap = llc->sap; 97 sap = llc->sap;
83 read_unlock_bh(&sap->sk_list.lock); 98 sk = laddr_hash_next(sap, llc_sk_laddr_hashfn(sap, &llc->laddr));
84 sk = NULL; 99 if (sk)
85 for (;;) { 100 goto out;
86 if (sap->node.next == &llc_sap_list) 101 spin_unlock_bh(&sap->sk_lock);
87 break; 102 list_for_each_entry_continue_rcu(sap, &llc_sap_list, node) {
88 sap = list_entry(sap->node.next, struct llc_sap, node); 103 spin_lock_bh(&sap->sk_lock);
89 read_lock_bh(&sap->sk_list.lock); 104 sk = laddr_hash_next(sap, -1);
90 if (!hlist_empty(&sap->sk_list.list)) { 105 if (sk)
91 sk = sk_head(&sap->sk_list.list); 106 break; /* keep the lock */
92 break; 107 spin_unlock_bh(&sap->sk_lock);
93 }
94 read_unlock_bh(&sap->sk_list.lock);
95 } 108 }
96out: 109out:
97 return sk; 110 return sk;
@@ -104,9 +117,9 @@ static void llc_seq_stop(struct seq_file *seq, void *v)
104 struct llc_sock *llc = llc_sk(sk); 117 struct llc_sock *llc = llc_sk(sk);
105 struct llc_sap *sap = llc->sap; 118 struct llc_sap *sap = llc->sap;
106 119
107 read_unlock_bh(&sap->sk_list.lock); 120 spin_unlock_bh(&sap->sk_lock);
108 } 121 }
109 read_unlock_bh(&llc_sap_list_lock); 122 rcu_read_unlock_bh();
110} 123}
111 124
112static int llc_seq_socket_show(struct seq_file *seq, void *v) 125static int llc_seq_socket_show(struct seq_file *seq, void *v)
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 008de1fc42c..ad6e6e1cf22 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -297,6 +297,17 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
297 llc_sap_state_process(sap, skb); 297 llc_sap_state_process(sap, skb);
298} 298}
299 299
300static inline bool llc_dgram_match(const struct llc_sap *sap,
301 const struct llc_addr *laddr,
302 const struct sock *sk)
303{
304 struct llc_sock *llc = llc_sk(sk);
305
306 return sk->sk_type == SOCK_DGRAM &&
307 llc->laddr.lsap == laddr->lsap &&
308 llc_mac_match(llc->laddr.mac, laddr->mac);
309}
310
300/** 311/**
301 * llc_lookup_dgram - Finds dgram socket for the local sap/mac 312 * llc_lookup_dgram - Finds dgram socket for the local sap/mac
302 * @sap: SAP 313 * @sap: SAP
@@ -309,25 +320,68 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
309 const struct llc_addr *laddr) 320 const struct llc_addr *laddr)
310{ 321{
311 struct sock *rc; 322 struct sock *rc;
312 struct hlist_node *node; 323 struct hlist_nulls_node *node;
313 324 int slot = llc_sk_laddr_hashfn(sap, laddr);
314 read_lock_bh(&sap->sk_list.lock); 325 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
315 sk_for_each(rc, node, &sap->sk_list.list) { 326
316 struct llc_sock *llc = llc_sk(rc); 327 rcu_read_lock_bh();
317 328again:
318 if (rc->sk_type == SOCK_DGRAM && 329 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
319 llc->laddr.lsap == laddr->lsap && 330 if (llc_dgram_match(sap, laddr, rc)) {
320 llc_mac_match(llc->laddr.mac, laddr->mac)) { 331 /* Extra checks required by SLAB_DESTROY_BY_RCU */
321 sock_hold(rc); 332 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
333 goto again;
334 if (unlikely(llc_sk(rc)->sap != sap ||
335 !llc_dgram_match(sap, laddr, rc))) {
336 sock_put(rc);
337 continue;
338 }
322 goto found; 339 goto found;
323 } 340 }
324 } 341 }
325 rc = NULL; 342 rc = NULL;
343 /*
344 * if the nulls value we got at the end of this lookup is
345 * not the expected one, we must restart lookup.
346 * We probably met an item that was moved to another chain.
347 */
348 if (unlikely(get_nulls_value(node) != slot))
349 goto again;
326found: 350found:
327 read_unlock_bh(&sap->sk_list.lock); 351 rcu_read_unlock_bh();
328 return rc; 352 return rc;
329} 353}
330 354
355static inline bool llc_mcast_match(const struct llc_sap *sap,
356 const struct llc_addr *laddr,
357 const struct sk_buff *skb,
358 const struct sock *sk)
359{
360 struct llc_sock *llc = llc_sk(sk);
361
362 return sk->sk_type == SOCK_DGRAM &&
363 llc->laddr.lsap == laddr->lsap &&
364 llc->dev == skb->dev;
365}
366
367static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb,
368 struct sock **stack, int count)
369{
370 struct sk_buff *skb1;
371 int i;
372
373 for (i = 0; i < count; i++) {
374 skb1 = skb_clone(skb, GFP_ATOMIC);
375 if (!skb1) {
376 sock_put(stack[i]);
377 continue;
378 }
379
380 llc_sap_rcv(sap, skb1, stack[i]);
381 sock_put(stack[i]);
382 }
383}
384
331/** 385/**
332 * llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets. 386 * llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets.
333 * @sap: SAP 387 * @sap: SAP
@@ -340,32 +394,31 @@ static void llc_sap_mcast(struct llc_sap *sap,
340 const struct llc_addr *laddr, 394 const struct llc_addr *laddr,
341 struct sk_buff *skb) 395 struct sk_buff *skb)
342{ 396{
343 struct sock *sk; 397 int i = 0, count = 256 / sizeof(struct sock *);
398 struct sock *sk, *stack[count];
344 struct hlist_node *node; 399 struct hlist_node *node;
400 struct llc_sock *llc;
401 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
345 402
346 read_lock_bh(&sap->sk_list.lock); 403 spin_lock_bh(&sap->sk_lock);
347 sk_for_each(sk, node, &sap->sk_list.list) { 404 hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) {
348 struct llc_sock *llc = llc_sk(sk);
349 struct sk_buff *skb1;
350 405
351 if (sk->sk_type != SOCK_DGRAM) 406 sk = &llc->sk;
352 continue;
353 407
354 if (llc->laddr.lsap != laddr->lsap) 408 if (!llc_mcast_match(sap, laddr, skb, sk))
355 continue; 409 continue;
356 410
357 if (llc->dev != skb->dev)
358 continue;
359
360 skb1 = skb_clone(skb, GFP_ATOMIC);
361 if (!skb1)
362 break;
363
364 sock_hold(sk); 411 sock_hold(sk);
365 llc_sap_rcv(sap, skb1, sk); 412 if (i < count)
366 sock_put(sk); 413 stack[i++] = sk;
414 else {
415 llc_do_mcast(sap, skb, stack, i);
416 i = 0;
417 }
367 } 418 }
368 read_unlock_bh(&sap->sk_list.lock); 419 spin_unlock_bh(&sap->sk_lock);
420
421 llc_do_mcast(sap, skb, stack, i);
369} 422}
370 423
371 424
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a10d508b07e..a952b7f8c64 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -96,18 +96,6 @@ menuconfig MAC80211_DEBUG_MENU
96 ---help--- 96 ---help---
97 This option collects various mac80211 debug settings. 97 This option collects various mac80211 debug settings.
98 98
99config MAC80211_DEBUG_PACKET_ALIGNMENT
100 bool "Enable packet alignment debugging"
101 depends on MAC80211_DEBUG_MENU
102 ---help---
103 This option is recommended for driver authors and strongly
104 discouraged for everybody else, it will trigger a warning
105 when a driver hands mac80211 a buffer that is aligned in
106 a way that will cause problems with the IP stack on some
107 architectures.
108
109 Say N unless you're writing a mac80211 based driver.
110
111config MAC80211_NOINLINE 99config MAC80211_NOINLINE
112 bool "Do not inline TX/RX handlers" 100 bool "Do not inline TX/RX handlers"
113 depends on MAC80211_DEBUG_MENU 101 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 298cfcc1bf8..04420291e7a 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -6,10 +6,10 @@ mac80211-y := \
6 sta_info.o \ 6 sta_info.o \
7 wep.o \ 7 wep.o \
8 wpa.o \ 8 wpa.o \
9 scan.o \ 9 scan.o offchannel.o \
10 ht.o agg-tx.o agg-rx.o \ 10 ht.o agg-tx.o agg-rx.o \
11 ibss.o \ 11 ibss.o \
12 mlme.o \ 12 mlme.o work.o \
13 iface.o \ 13 iface.o \
14 rate.o \ 14 rate.o \
15 michael.o \ 15 michael.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 51c7dc3c4c3..a978e666ed6 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -41,8 +41,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
41 sta->sta.addr, tid); 41 sta->sta.addr, tid);
42#endif /* CONFIG_MAC80211_HT_DEBUG */ 42#endif /* CONFIG_MAC80211_HT_DEBUG */
43 43
44 if (drv_ampdu_action(local, &sta->sdata->vif, 44 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
45 IEEE80211_AMPDU_RX_STOP,
46 &sta->sta, tid, NULL)) 45 &sta->sta, tid, NULL))
47 printk(KERN_DEBUG "HW problem - can not stop rx " 46 printk(KERN_DEBUG "HW problem - can not stop rx "
48 "aggregation for tid %d\n", tid); 47 "aggregation for tid %d\n", tid);
@@ -83,12 +82,11 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
83void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, 82void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
84 u16 initiator, u16 reason) 83 u16 initiator, u16 reason)
85{ 84{
86 struct ieee80211_local *local = sdata->local;
87 struct sta_info *sta; 85 struct sta_info *sta;
88 86
89 rcu_read_lock(); 87 rcu_read_lock();
90 88
91 sta = sta_info_get(local, ra); 89 sta = sta_info_get(sdata, ra);
92 if (!sta) { 90 if (!sta) {
93 rcu_read_unlock(); 91 rcu_read_unlock();
94 return; 92 return;
@@ -136,7 +134,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
136 134
137 if (!skb) { 135 if (!skb) {
138 printk(KERN_DEBUG "%s: failed to allocate buffer " 136 printk(KERN_DEBUG "%s: failed to allocate buffer "
139 "for addba resp frame\n", sdata->dev->name); 137 "for addba resp frame\n", sdata->name);
140 return; 138 return;
141 } 139 }
142 140
@@ -144,10 +142,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
144 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 142 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
145 memset(mgmt, 0, 24); 143 memset(mgmt, 0, 24);
146 memcpy(mgmt->da, da, ETH_ALEN); 144 memcpy(mgmt->da, da, ETH_ALEN);
147 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 145 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
148 if (sdata->vif.type == NL80211_IFTYPE_AP || 146 if (sdata->vif.type == NL80211_IFTYPE_AP ||
149 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 147 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
150 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 148 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
151 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 149 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
152 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 150 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
153 151
@@ -281,8 +279,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
281 goto end; 279 goto end;
282 } 280 }
283 281
284 ret = drv_ampdu_action(local, &sta->sdata->vif, 282 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
285 IEEE80211_AMPDU_RX_START,
286 &sta->sta, tid, &start_seq_num); 283 &sta->sta, tid, &start_seq_num);
287#ifdef CONFIG_MAC80211_HT_DEBUG 284#ifdef CONFIG_MAC80211_HT_DEBUG
288 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); 285 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5e3a7eccef5..718fbcff84d 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -58,17 +58,17 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
58 58
59 if (!skb) { 59 if (!skb) {
60 printk(KERN_ERR "%s: failed to allocate buffer " 60 printk(KERN_ERR "%s: failed to allocate buffer "
61 "for addba request frame\n", sdata->dev->name); 61 "for addba request frame\n", sdata->name);
62 return; 62 return;
63 } 63 }
64 skb_reserve(skb, local->hw.extra_tx_headroom); 64 skb_reserve(skb, local->hw.extra_tx_headroom);
65 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 65 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
66 memset(mgmt, 0, 24); 66 memset(mgmt, 0, 24);
67 memcpy(mgmt->da, da, ETH_ALEN); 67 memcpy(mgmt->da, da, ETH_ALEN);
68 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 68 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
69 if (sdata->vif.type == NL80211_IFTYPE_AP || 69 if (sdata->vif.type == NL80211_IFTYPE_AP ||
70 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 70 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
71 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 71 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
72 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 72 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
73 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 73 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
74 74
@@ -104,7 +104,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
104 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 104 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
105 if (!skb) { 105 if (!skb) {
106 printk(KERN_ERR "%s: failed to allocate buffer for " 106 printk(KERN_ERR "%s: failed to allocate buffer for "
107 "bar frame\n", sdata->dev->name); 107 "bar frame\n", sdata->name);
108 return; 108 return;
109 } 109 }
110 skb_reserve(skb, local->hw.extra_tx_headroom); 110 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -113,7 +113,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
113 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | 113 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
114 IEEE80211_STYPE_BACK_REQ); 114 IEEE80211_STYPE_BACK_REQ);
115 memcpy(bar->ra, ra, ETH_ALEN); 115 memcpy(bar->ra, ra, ETH_ALEN);
116 memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN); 116 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
117 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 117 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
118 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 118 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
119 bar_control |= (u16)(tid << 12); 119 bar_control |= (u16)(tid << 12);
@@ -144,7 +144,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
144 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 144 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
145 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 145 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
146 146
147 ret = drv_ampdu_action(local, &sta->sdata->vif, 147 ret = drv_ampdu_action(local, sta->sdata,
148 IEEE80211_AMPDU_TX_STOP, 148 IEEE80211_AMPDU_TX_STOP,
149 &sta->sta, tid, NULL); 149 &sta->sta, tid, NULL);
150 150
@@ -179,7 +179,8 @@ static void sta_addba_resp_timer_expired(unsigned long data)
179 179
180 /* check if the TID waits for addBA response */ 180 /* check if the TID waits for addBA response */
181 spin_lock_bh(&sta->lock); 181 spin_lock_bh(&sta->lock);
182 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) != 182 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
183 HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
183 HT_ADDBA_REQUESTED_MSK) { 184 HT_ADDBA_REQUESTED_MSK) {
184 spin_unlock_bh(&sta->lock); 185 spin_unlock_bh(&sta->lock);
185 *state = HT_AGG_STATE_IDLE; 186 *state = HT_AGG_STATE_IDLE;
@@ -301,10 +302,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
301 * call back right away, it must see that the flow has begun */ 302 * call back right away, it must see that the flow has begun */
302 *state |= HT_ADDBA_REQUESTED_MSK; 303 *state |= HT_ADDBA_REQUESTED_MSK;
303 304
304 start_seq_num = sta->tid_seq[tid]; 305 start_seq_num = sta->tid_seq[tid] >> 4;
305 306
306 ret = drv_ampdu_action(local, &sdata->vif, 307 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
307 IEEE80211_AMPDU_TX_START,
308 pubsta, tid, &start_seq_num); 308 pubsta, tid, &start_seq_num);
309 309
310 if (ret) { 310 if (ret) {
@@ -420,7 +420,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
420 ieee80211_agg_splice_finish(local, sta, tid); 420 ieee80211_agg_splice_finish(local, sta, tid);
421 spin_unlock(&local->ampdu_lock); 421 spin_unlock(&local->ampdu_lock);
422 422
423 drv_ampdu_action(local, &sta->sdata->vif, 423 drv_ampdu_action(local, sta->sdata,
424 IEEE80211_AMPDU_TX_OPERATIONAL, 424 IEEE80211_AMPDU_TX_OPERATIONAL,
425 &sta->sta, tid, NULL); 425 &sta->sta, tid, NULL);
426} 426}
@@ -441,7 +441,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
441 } 441 }
442 442
443 rcu_read_lock(); 443 rcu_read_lock();
444 sta = sta_info_get(local, ra); 444 sta = sta_info_get(sdata, ra);
445 if (!sta) { 445 if (!sta) {
446 rcu_read_unlock(); 446 rcu_read_unlock();
447#ifdef CONFIG_MAC80211_HT_DEBUG 447#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -489,7 +489,7 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
489#ifdef CONFIG_MAC80211_HT_DEBUG 489#ifdef CONFIG_MAC80211_HT_DEBUG
490 if (net_ratelimit()) 490 if (net_ratelimit())
491 printk(KERN_WARNING "%s: Not enough memory, " 491 printk(KERN_WARNING "%s: Not enough memory, "
492 "dropping start BA session", skb->dev->name); 492 "dropping start BA session", sdata->name);
493#endif 493#endif
494 return; 494 return;
495 } 495 }
@@ -564,7 +564,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
564#endif /* CONFIG_MAC80211_HT_DEBUG */ 564#endif /* CONFIG_MAC80211_HT_DEBUG */
565 565
566 rcu_read_lock(); 566 rcu_read_lock();
567 sta = sta_info_get(local, ra); 567 sta = sta_info_get(sdata, ra);
568 if (!sta) { 568 if (!sta) {
569#ifdef CONFIG_MAC80211_HT_DEBUG 569#ifdef CONFIG_MAC80211_HT_DEBUG
570 printk(KERN_DEBUG "Could not find station: %pM\n", ra); 570 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -621,7 +621,7 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
621#ifdef CONFIG_MAC80211_HT_DEBUG 621#ifdef CONFIG_MAC80211_HT_DEBUG
622 if (net_ratelimit()) 622 if (net_ratelimit())
623 printk(KERN_WARNING "%s: Not enough memory, " 623 printk(KERN_WARNING "%s: Not enough memory, "
624 "dropping stop BA session", skb->dev->name); 624 "dropping stop BA session", sdata->name);
625#endif 625#endif
626 return; 626 return;
627 } 627 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 9ae1a4760b5..facf233843e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -78,17 +78,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
78 enum nl80211_iftype type, u32 *flags, 78 enum nl80211_iftype type, u32 *flags,
79 struct vif_params *params) 79 struct vif_params *params)
80{ 80{
81 struct ieee80211_sub_if_data *sdata; 81 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
82 int ret; 82 int ret;
83 83
84 if (netif_running(dev)) 84 if (ieee80211_sdata_running(sdata))
85 return -EBUSY; 85 return -EBUSY;
86 86
87 if (!nl80211_params_check(type, params)) 87 if (!nl80211_params_check(type, params))
88 return -EINVAL; 88 return -EINVAL;
89 89
90 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
91
92 ret = ieee80211_if_change_type(sdata, type); 90 ret = ieee80211_if_change_type(sdata, type);
93 if (ret) 91 if (ret)
94 return ret; 92 return ret;
@@ -150,7 +148,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
150 rcu_read_lock(); 148 rcu_read_lock();
151 149
152 if (mac_addr) { 150 if (mac_addr) {
153 sta = sta_info_get(sdata->local, mac_addr); 151 sta = sta_info_get_bss(sdata, mac_addr);
154 if (!sta) { 152 if (!sta) {
155 ieee80211_key_free(key); 153 ieee80211_key_free(key);
156 err = -ENOENT; 154 err = -ENOENT;
@@ -181,7 +179,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
181 if (mac_addr) { 179 if (mac_addr) {
182 ret = -ENOENT; 180 ret = -ENOENT;
183 181
184 sta = sta_info_get(sdata->local, mac_addr); 182 sta = sta_info_get_bss(sdata, mac_addr);
185 if (!sta) 183 if (!sta)
186 goto out_unlock; 184 goto out_unlock;
187 185
@@ -228,7 +226,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
228 rcu_read_lock(); 226 rcu_read_lock();
229 227
230 if (mac_addr) { 228 if (mac_addr) {
231 sta = sta_info_get(sdata->local, mac_addr); 229 sta = sta_info_get_bss(sdata, mac_addr);
232 if (!sta) 230 if (!sta)
233 goto out; 231 goto out;
234 232
@@ -415,15 +413,13 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
415static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, 413static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
416 u8 *mac, struct station_info *sinfo) 414 u8 *mac, struct station_info *sinfo)
417{ 415{
418 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 416 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
419 struct sta_info *sta; 417 struct sta_info *sta;
420 int ret = -ENOENT; 418 int ret = -ENOENT;
421 419
422 rcu_read_lock(); 420 rcu_read_lock();
423 421
424 /* XXX: verify sta->dev == dev */ 422 sta = sta_info_get_bss(sdata, mac);
425
426 sta = sta_info_get(local, mac);
427 if (sta) { 423 if (sta) {
428 ret = 0; 424 ret = 0;
429 sta_set_sinfo(sta, sinfo); 425 sta_set_sinfo(sta, sinfo);
@@ -732,7 +728,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
732 } else 728 } else
733 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 729 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
734 730
735 if (compare_ether_addr(mac, dev->dev_addr) == 0) 731 if (compare_ether_addr(mac, sdata->vif.addr) == 0)
736 return -EINVAL; 732 return -EINVAL;
737 733
738 if (is_multicast_ether_addr(mac)) 734 if (is_multicast_ether_addr(mac))
@@ -779,8 +775,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
779 if (mac) { 775 if (mac) {
780 rcu_read_lock(); 776 rcu_read_lock();
781 777
782 /* XXX: get sta belonging to dev */ 778 sta = sta_info_get_bss(sdata, mac);
783 sta = sta_info_get(local, mac);
784 if (!sta) { 779 if (!sta) {
785 rcu_read_unlock(); 780 rcu_read_unlock();
786 return -ENOENT; 781 return -ENOENT;
@@ -801,14 +796,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
801 u8 *mac, 796 u8 *mac,
802 struct station_parameters *params) 797 struct station_parameters *params)
803{ 798{
799 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
804 struct ieee80211_local *local = wiphy_priv(wiphy); 800 struct ieee80211_local *local = wiphy_priv(wiphy);
805 struct sta_info *sta; 801 struct sta_info *sta;
806 struct ieee80211_sub_if_data *vlansdata; 802 struct ieee80211_sub_if_data *vlansdata;
807 803
808 rcu_read_lock(); 804 rcu_read_lock();
809 805
810 /* XXX: get sta belonging to dev */ 806 sta = sta_info_get_bss(sdata, mac);
811 sta = sta_info_get(local, mac);
812 if (!sta) { 807 if (!sta) {
813 rcu_read_unlock(); 808 rcu_read_unlock();
814 return -ENOENT; 809 return -ENOENT;
@@ -847,7 +842,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
847static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, 842static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
848 u8 *dst, u8 *next_hop) 843 u8 *dst, u8 *next_hop)
849{ 844{
850 struct ieee80211_local *local = wiphy_priv(wiphy);
851 struct ieee80211_sub_if_data *sdata; 845 struct ieee80211_sub_if_data *sdata;
852 struct mesh_path *mpath; 846 struct mesh_path *mpath;
853 struct sta_info *sta; 847 struct sta_info *sta;
@@ -856,7 +850,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
856 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 850 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
857 851
858 rcu_read_lock(); 852 rcu_read_lock();
859 sta = sta_info_get(local, next_hop); 853 sta = sta_info_get(sdata, next_hop);
860 if (!sta) { 854 if (!sta) {
861 rcu_read_unlock(); 855 rcu_read_unlock();
862 return -ENOENT; 856 return -ENOENT;
@@ -895,7 +889,6 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
895 struct net_device *dev, 889 struct net_device *dev,
896 u8 *dst, u8 *next_hop) 890 u8 *dst, u8 *next_hop)
897{ 891{
898 struct ieee80211_local *local = wiphy_priv(wiphy);
899 struct ieee80211_sub_if_data *sdata; 892 struct ieee80211_sub_if_data *sdata;
900 struct mesh_path *mpath; 893 struct mesh_path *mpath;
901 struct sta_info *sta; 894 struct sta_info *sta;
@@ -904,7 +897,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
904 897
905 rcu_read_lock(); 898 rcu_read_lock();
906 899
907 sta = sta_info_get(local, next_hop); 900 sta = sta_info_get(sdata, next_hop);
908 if (!sta) { 901 if (!sta) {
909 rcu_read_unlock(); 902 rcu_read_unlock();
910 return -ENOENT; 903 return -ENOENT;
@@ -1092,6 +1085,13 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1092 params->use_short_preamble; 1085 params->use_short_preamble;
1093 changed |= BSS_CHANGED_ERP_PREAMBLE; 1086 changed |= BSS_CHANGED_ERP_PREAMBLE;
1094 } 1087 }
1088
1089 if (!sdata->vif.bss_conf.use_short_slot &&
1090 sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
1091 sdata->vif.bss_conf.use_short_slot = true;
1092 changed |= BSS_CHANGED_ERP_SLOT;
1093 }
1094
1095 if (params->use_short_slot_time >= 0) { 1095 if (params->use_short_slot_time >= 0) {
1096 sdata->vif.bss_conf.use_short_slot = 1096 sdata->vif.bss_conf.use_short_slot =
1097 params->use_short_slot_time; 1097 params->use_short_slot_time;
@@ -1135,6 +1135,13 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1135 p.cw_max = params->cwmax; 1135 p.cw_max = params->cwmax;
1136 p.cw_min = params->cwmin; 1136 p.cw_min = params->cwmin;
1137 p.txop = params->txop; 1137 p.txop = params->txop;
1138
1139 /*
1140 * Setting tx queue params disables u-apsd because it's only
1141 * called in master mode.
1142 */
1143 p.uapsd = false;
1144
1138 if (drv_conf_tx(local, params->queue, &p)) { 1145 if (drv_conf_tx(local, params->queue, &p)) {
1139 printk(KERN_DEBUG "%s: failed to set TX queue " 1146 printk(KERN_DEBUG "%s: failed to set TX queue "
1140 "parameters for queue %d\n", 1147 "parameters for queue %d\n",
@@ -1237,6 +1244,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1237 struct ieee80211_local *local = wiphy_priv(wiphy); 1244 struct ieee80211_local *local = wiphy_priv(wiphy);
1238 int err; 1245 int err;
1239 1246
1247 if (changed & WIPHY_PARAM_COVERAGE_CLASS) {
1248 err = drv_set_coverage_class(local, wiphy->coverage_class);
1249
1250 if (err)
1251 return err;
1252 }
1253
1240 if (changed & WIPHY_PARAM_RTS_THRESHOLD) { 1254 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
1241 err = drv_set_rts_threshold(local, wiphy->rts_threshold); 1255 err = drv_set_rts_threshold(local, wiphy->rts_threshold);
1242 1256
@@ -1324,6 +1338,50 @@ static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
1324} 1338}
1325#endif 1339#endif
1326 1340
1341int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1342 enum ieee80211_smps_mode smps_mode)
1343{
1344 const u8 *ap;
1345 enum ieee80211_smps_mode old_req;
1346 int err;
1347
1348 old_req = sdata->u.mgd.req_smps;
1349 sdata->u.mgd.req_smps = smps_mode;
1350
1351 if (old_req == smps_mode &&
1352 smps_mode != IEEE80211_SMPS_AUTOMATIC)
1353 return 0;
1354
1355 /*
1356 * If not associated, or current association is not an HT
1357 * association, there's no need to send an action frame.
1358 */
1359 if (!sdata->u.mgd.associated ||
1360 sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) {
1361 mutex_lock(&sdata->local->iflist_mtx);
1362 ieee80211_recalc_smps(sdata->local, sdata);
1363 mutex_unlock(&sdata->local->iflist_mtx);
1364 return 0;
1365 }
1366
1367 ap = sdata->u.mgd.associated->bssid;
1368
1369 if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
1370 if (sdata->u.mgd.powersave)
1371 smps_mode = IEEE80211_SMPS_DYNAMIC;
1372 else
1373 smps_mode = IEEE80211_SMPS_OFF;
1374 }
1375
1376 /* send SM PS frame to AP */
1377 err = ieee80211_send_smps_action(sdata, smps_mode,
1378 ap, ap);
1379 if (err)
1380 sdata->u.mgd.req_smps = old_req;
1381
1382 return err;
1383}
1384
1327static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, 1385static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1328 bool enabled, int timeout) 1386 bool enabled, int timeout)
1329{ 1387{
@@ -1344,6 +1402,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1344 sdata->u.mgd.powersave = enabled; 1402 sdata->u.mgd.powersave = enabled;
1345 conf->dynamic_ps_timeout = timeout; 1403 conf->dynamic_ps_timeout = timeout;
1346 1404
1405 /* no change, but if automatic follow powersave */
1406 mutex_lock(&sdata->u.mgd.mtx);
1407 __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps);
1408 mutex_unlock(&sdata->u.mgd.mtx);
1409
1347 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) 1410 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1348 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 1411 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
1349 1412
@@ -1359,39 +1422,43 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1359{ 1422{
1360 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1423 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1361 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1424 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1362 int i, err = -EINVAL; 1425 int i;
1363 u32 target_rate;
1364 struct ieee80211_supported_band *sband;
1365 1426
1366 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1427 /*
1428 * This _could_ be supported by providing a hook for
1429 * drivers for this function, but at this point it
1430 * doesn't seem worth bothering.
1431 */
1432 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
1433 return -EOPNOTSUPP;
1367 1434
1368 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
1369 * target_rate = X, rate->fixed = 1 means only rate X
1370 * target_rate = X, rate->fixed = 0 means all rates <= X */
1371 sdata->max_ratectrl_rateidx = -1;
1372 sdata->force_unicast_rateidx = -1;
1373 1435
1374 if (mask->fixed) 1436 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
1375 target_rate = mask->fixed / 100; 1437 sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
1376 else if (mask->maxrate)
1377 target_rate = mask->maxrate / 100;
1378 else
1379 return 0;
1380 1438
1381 for (i=0; i< sband->n_bitrates; i++) { 1439 return 0;
1382 struct ieee80211_rate *brate = &sband->bitrates[i]; 1440}
1383 int this_rate = brate->bitrate;
1384 1441
1385 if (target_rate == this_rate) { 1442static int ieee80211_remain_on_channel(struct wiphy *wiphy,
1386 sdata->max_ratectrl_rateidx = i; 1443 struct net_device *dev,
1387 if (mask->fixed) 1444 struct ieee80211_channel *chan,
1388 sdata->force_unicast_rateidx = i; 1445 enum nl80211_channel_type channel_type,
1389 err = 0; 1446 unsigned int duration,
1390 break; 1447 u64 *cookie)
1391 } 1448{
1392 } 1449 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1393 1450
1394 return err; 1451 return ieee80211_wk_remain_on_channel(sdata, chan, channel_type,
1452 duration, cookie);
1453}
1454
1455static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
1456 struct net_device *dev,
1457 u64 cookie)
1458{
1459 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1460
1461 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
1395} 1462}
1396 1463
1397struct cfg80211_ops mac80211_config_ops = { 1464struct cfg80211_ops mac80211_config_ops = {
@@ -1440,4 +1507,6 @@ struct cfg80211_ops mac80211_config_ops = {
1440 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) 1507 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
1441 .set_power_mgmt = ieee80211_set_power_mgmt, 1508 .set_power_mgmt = ieee80211_set_power_mgmt,
1442 .set_bitrate_mask = ieee80211_set_bitrate_mask, 1509 .set_bitrate_mask = ieee80211_set_bitrate_mask,
1510 .remain_on_channel = ieee80211_remain_on_channel,
1511 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
1443}; 1512};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e4b54093d41..b3bc32b62a5 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -158,6 +158,98 @@ static const struct file_operations noack_ops = {
158 .open = mac80211_open_file_generic 158 .open = mac80211_open_file_generic
159}; 159};
160 160
161static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
162 size_t count, loff_t *ppos)
163{
164 struct ieee80211_local *local = file->private_data;
165 int res;
166 char buf[10];
167
168 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues);
169
170 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
171}
172
173static ssize_t uapsd_queues_write(struct file *file,
174 const char __user *user_buf,
175 size_t count, loff_t *ppos)
176{
177 struct ieee80211_local *local = file->private_data;
178 unsigned long val;
179 char buf[10];
180 size_t len;
181 int ret;
182
183 len = min(count, sizeof(buf) - 1);
184 if (copy_from_user(buf, user_buf, len))
185 return -EFAULT;
186 buf[len] = '\0';
187
188 ret = strict_strtoul(buf, 0, &val);
189
190 if (ret)
191 return -EINVAL;
192
193 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
194 return -ERANGE;
195
196 local->uapsd_queues = val;
197
198 return count;
199}
200
201static const struct file_operations uapsd_queues_ops = {
202 .read = uapsd_queues_read,
203 .write = uapsd_queues_write,
204 .open = mac80211_open_file_generic
205};
206
207static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
208 size_t count, loff_t *ppos)
209{
210 struct ieee80211_local *local = file->private_data;
211 int res;
212 char buf[10];
213
214 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len);
215
216 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
217}
218
219static ssize_t uapsd_max_sp_len_write(struct file *file,
220 const char __user *user_buf,
221 size_t count, loff_t *ppos)
222{
223 struct ieee80211_local *local = file->private_data;
224 unsigned long val;
225 char buf[10];
226 size_t len;
227 int ret;
228
229 len = min(count, sizeof(buf) - 1);
230 if (copy_from_user(buf, user_buf, len))
231 return -EFAULT;
232 buf[len] = '\0';
233
234 ret = strict_strtoul(buf, 0, &val);
235
236 if (ret)
237 return -EINVAL;
238
239 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
240 return -ERANGE;
241
242 local->uapsd_max_sp_len = val;
243
244 return count;
245}
246
247static const struct file_operations uapsd_max_sp_len_ops = {
248 .read = uapsd_max_sp_len_read,
249 .write = uapsd_max_sp_len_write,
250 .open = mac80211_open_file_generic
251};
252
161static ssize_t queues_read(struct file *file, char __user *user_buf, 253static ssize_t queues_read(struct file *file, char __user *user_buf,
162 size_t count, loff_t *ppos) 254 size_t count, loff_t *ppos)
163{ 255{
@@ -314,6 +406,8 @@ void debugfs_hw_add(struct ieee80211_local *local)
314 DEBUGFS_ADD(queues); 406 DEBUGFS_ADD(queues);
315 DEBUGFS_ADD_MODE(reset, 0200); 407 DEBUGFS_ADD_MODE(reset, 0200);
316 DEBUGFS_ADD(noack); 408 DEBUGFS_ADD(noack);
409 DEBUGFS_ADD(uapsd_queues);
410 DEBUGFS_ADD(uapsd_max_sp_len);
317 411
318 statsd = debugfs_create_dir("statistics", phyd); 412 statsd = debugfs_create_dir("statistics", phyd);
319 413
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index e0f5224630d..d12e743cb4e 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -56,7 +56,7 @@ KEY_CONF_FILE(keyidx, D);
56KEY_CONF_FILE(hw_key_idx, D); 56KEY_CONF_FILE(hw_key_idx, D);
57KEY_FILE(flags, X); 57KEY_FILE(flags, X);
58KEY_FILE(tx_rx_count, D); 58KEY_FILE(tx_rx_count, D);
59KEY_READ(ifindex, sdata->dev->ifindex, 20, "%d\n"); 59KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n");
60KEY_OPS(ifindex); 60KEY_OPS(ifindex);
61 61
62static ssize_t key_algorithm_read(struct file *file, 62static ssize_t key_algorithm_read(struct file *file,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 472b2039906..9affe2cd185 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -41,6 +41,30 @@ static ssize_t ieee80211_if_read(
41 return ret; 41 return ret;
42} 42}
43 43
44static ssize_t ieee80211_if_write(
45 struct ieee80211_sub_if_data *sdata,
46 const char __user *userbuf,
47 size_t count, loff_t *ppos,
48 ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
49{
50 u8 *buf;
51 ssize_t ret = -ENODEV;
52
53 buf = kzalloc(count, GFP_KERNEL);
54 if (!buf)
55 return -ENOMEM;
56
57 if (copy_from_user(buf, userbuf, count))
58 return -EFAULT;
59
60 rtnl_lock();
61 if (sdata->dev->reg_state == NETREG_REGISTERED)
62 ret = (*write)(sdata, buf, count);
63 rtnl_unlock();
64
65 return ret;
66}
67
44#define IEEE80211_IF_FMT(name, field, format_string) \ 68#define IEEE80211_IF_FMT(name, field, format_string) \
45static ssize_t ieee80211_if_fmt_##name( \ 69static ssize_t ieee80211_if_fmt_##name( \
46 const struct ieee80211_sub_if_data *sdata, char *buf, \ 70 const struct ieee80211_sub_if_data *sdata, char *buf, \
@@ -71,7 +95,7 @@ static ssize_t ieee80211_if_fmt_##name( \
71 return scnprintf(buf, buflen, "%pM\n", sdata->field); \ 95 return scnprintf(buf, buflen, "%pM\n", sdata->field); \
72} 96}
73 97
74#define __IEEE80211_IF_FILE(name) \ 98#define __IEEE80211_IF_FILE(name, _write) \
75static ssize_t ieee80211_if_read_##name(struct file *file, \ 99static ssize_t ieee80211_if_read_##name(struct file *file, \
76 char __user *userbuf, \ 100 char __user *userbuf, \
77 size_t count, loff_t *ppos) \ 101 size_t count, loff_t *ppos) \
@@ -82,22 +106,99 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \
82} \ 106} \
83static const struct file_operations name##_ops = { \ 107static const struct file_operations name##_ops = { \
84 .read = ieee80211_if_read_##name, \ 108 .read = ieee80211_if_read_##name, \
109 .write = (_write), \
85 .open = mac80211_open_file_generic, \ 110 .open = mac80211_open_file_generic, \
86} 111}
87 112
113#define __IEEE80211_IF_FILE_W(name) \
114static ssize_t ieee80211_if_write_##name(struct file *file, \
115 const char __user *userbuf, \
116 size_t count, loff_t *ppos) \
117{ \
118 return ieee80211_if_write(file->private_data, userbuf, count, \
119 ppos, ieee80211_if_parse_##name); \
120} \
121__IEEE80211_IF_FILE(name, ieee80211_if_write_##name)
122
123
88#define IEEE80211_IF_FILE(name, field, format) \ 124#define IEEE80211_IF_FILE(name, field, format) \
89 IEEE80211_IF_FMT_##format(name, field) \ 125 IEEE80211_IF_FMT_##format(name, field) \
90 __IEEE80211_IF_FILE(name) 126 __IEEE80211_IF_FILE(name, NULL)
91 127
92/* common attributes */ 128/* common attributes */
93IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); 129IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
94IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC); 130IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
95IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC); 131 HEX);
132IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
133 HEX);
96 134
97/* STA attributes */ 135/* STA attributes */
98IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); 136IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
99IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); 137IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
100IEEE80211_IF_FILE(capab, u.mgd.capab, HEX); 138
139static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
140 enum ieee80211_smps_mode smps_mode)
141{
142 struct ieee80211_local *local = sdata->local;
143 int err;
144
145 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) &&
146 smps_mode == IEEE80211_SMPS_STATIC)
147 return -EINVAL;
148
149 /* auto should be dynamic if in PS mode */
150 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) &&
151 (smps_mode == IEEE80211_SMPS_DYNAMIC ||
152 smps_mode == IEEE80211_SMPS_AUTOMATIC))
153 return -EINVAL;
154
155 /* supported only on managed interfaces for now */
156 if (sdata->vif.type != NL80211_IFTYPE_STATION)
157 return -EOPNOTSUPP;
158
159 mutex_lock(&local->iflist_mtx);
160 err = __ieee80211_request_smps(sdata, smps_mode);
161 mutex_unlock(&local->iflist_mtx);
162
163 return err;
164}
165
166static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
167 [IEEE80211_SMPS_AUTOMATIC] = "auto",
168 [IEEE80211_SMPS_OFF] = "off",
169 [IEEE80211_SMPS_STATIC] = "static",
170 [IEEE80211_SMPS_DYNAMIC] = "dynamic",
171};
172
173static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
174 char *buf, int buflen)
175{
176 if (sdata->vif.type != NL80211_IFTYPE_STATION)
177 return -EOPNOTSUPP;
178
179 return snprintf(buf, buflen, "request: %s\nused: %s\n",
180 smps_modes[sdata->u.mgd.req_smps],
181 smps_modes[sdata->u.mgd.ap_smps]);
182}
183
184static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
185 const char *buf, int buflen)
186{
187 enum ieee80211_smps_mode mode;
188
189 for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) {
190 if (strncmp(buf, smps_modes[mode], buflen) == 0) {
191 int err = ieee80211_set_smps(sdata, mode);
192 if (!err)
193 return buflen;
194 return err;
195 }
196 }
197
198 return -EINVAL;
199}
200
201__IEEE80211_IF_FILE_W(smps);
101 202
102/* AP attributes */ 203/* AP attributes */
103IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 204IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
@@ -109,7 +210,7 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
109 return scnprintf(buf, buflen, "%u\n", 210 return scnprintf(buf, buflen, "%u\n",
110 skb_queue_len(&sdata->u.ap.ps_bc_buf)); 211 skb_queue_len(&sdata->u.ap.ps_bc_buf));
111} 212}
112__IEEE80211_IF_FILE(num_buffered_multicast); 213__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
113 214
114/* WDS attributes */ 215/* WDS attributes */
115IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); 216IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
@@ -154,46 +255,50 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
154#endif 255#endif
155 256
156 257
157#define DEBUGFS_ADD(name, type) \ 258#define DEBUGFS_ADD(name) \
158 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \ 259 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
159 sdata, &name##_ops); 260 sdata, &name##_ops);
160 261
262#define DEBUGFS_ADD_MODE(name, mode) \
263 debugfs_create_file(#name, mode, sdata->debugfs.dir, \
264 sdata, &name##_ops);
265
161static void add_sta_files(struct ieee80211_sub_if_data *sdata) 266static void add_sta_files(struct ieee80211_sub_if_data *sdata)
162{ 267{
163 DEBUGFS_ADD(drop_unencrypted, sta); 268 DEBUGFS_ADD(drop_unencrypted);
164 DEBUGFS_ADD(force_unicast_rateidx, sta); 269 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
165 DEBUGFS_ADD(max_ratectrl_rateidx, sta); 270 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
166 271
167 DEBUGFS_ADD(bssid, sta); 272 DEBUGFS_ADD(bssid);
168 DEBUGFS_ADD(aid, sta); 273 DEBUGFS_ADD(aid);
169 DEBUGFS_ADD(capab, sta); 274 DEBUGFS_ADD_MODE(smps, 0600);
170} 275}
171 276
172static void add_ap_files(struct ieee80211_sub_if_data *sdata) 277static void add_ap_files(struct ieee80211_sub_if_data *sdata)
173{ 278{
174 DEBUGFS_ADD(drop_unencrypted, ap); 279 DEBUGFS_ADD(drop_unencrypted);
175 DEBUGFS_ADD(force_unicast_rateidx, ap); 280 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
176 DEBUGFS_ADD(max_ratectrl_rateidx, ap); 281 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
177 282
178 DEBUGFS_ADD(num_sta_ps, ap); 283 DEBUGFS_ADD(num_sta_ps);
179 DEBUGFS_ADD(dtim_count, ap); 284 DEBUGFS_ADD(dtim_count);
180 DEBUGFS_ADD(num_buffered_multicast, ap); 285 DEBUGFS_ADD(num_buffered_multicast);
181} 286}
182 287
183static void add_wds_files(struct ieee80211_sub_if_data *sdata) 288static void add_wds_files(struct ieee80211_sub_if_data *sdata)
184{ 289{
185 DEBUGFS_ADD(drop_unencrypted, wds); 290 DEBUGFS_ADD(drop_unencrypted);
186 DEBUGFS_ADD(force_unicast_rateidx, wds); 291 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
187 DEBUGFS_ADD(max_ratectrl_rateidx, wds); 292 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
188 293
189 DEBUGFS_ADD(peer, wds); 294 DEBUGFS_ADD(peer);
190} 295}
191 296
192static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 297static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
193{ 298{
194 DEBUGFS_ADD(drop_unencrypted, vlan); 299 DEBUGFS_ADD(drop_unencrypted);
195 DEBUGFS_ADD(force_unicast_rateidx, vlan); 300 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
196 DEBUGFS_ADD(max_ratectrl_rateidx, vlan); 301 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
197} 302}
198 303
199static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 304static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -280,16 +385,11 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
280 } 385 }
281} 386}
282 387
283static int notif_registered;
284
285void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) 388void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
286{ 389{
287 char buf[10+IFNAMSIZ]; 390 char buf[10+IFNAMSIZ];
288 391
289 if (!notif_registered) 392 sprintf(buf, "netdev:%s", sdata->name);
290 return;
291
292 sprintf(buf, "netdev:%s", sdata->dev->name);
293 sdata->debugfs.dir = debugfs_create_dir(buf, 393 sdata->debugfs.dir = debugfs_create_dir(buf,
294 sdata->local->hw.wiphy->debugfsdir); 394 sdata->local->hw.wiphy->debugfsdir);
295 add_files(sdata); 395 add_files(sdata);
@@ -304,58 +404,18 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
304 sdata->debugfs.dir = NULL; 404 sdata->debugfs.dir = NULL;
305} 405}
306 406
307static int netdev_notify(struct notifier_block *nb, 407void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
308 unsigned long state,
309 void *ndev)
310{ 408{
311 struct net_device *dev = ndev;
312 struct dentry *dir; 409 struct dentry *dir;
313 struct ieee80211_sub_if_data *sdata; 410 char buf[10 + IFNAMSIZ];
314 char buf[10+IFNAMSIZ];
315
316 if (state != NETDEV_CHANGENAME)
317 return 0;
318
319 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
320 return 0;
321
322 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
323 return 0;
324
325 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
326 411
327 dir = sdata->debugfs.dir; 412 dir = sdata->debugfs.dir;
328 413
329 if (!dir) 414 if (!dir)
330 return 0; 415 return;
331 416
332 sprintf(buf, "netdev:%s", dev->name); 417 sprintf(buf, "netdev:%s", sdata->name);
333 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) 418 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
334 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs " 419 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
335 "dir to %s\n", buf); 420 "dir to %s\n", buf);
336
337 return 0;
338}
339
340static struct notifier_block mac80211_debugfs_netdev_notifier = {
341 .notifier_call = netdev_notify,
342};
343
344void ieee80211_debugfs_netdev_init(void)
345{
346 int err;
347
348 err = register_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
349 if (err) {
350 printk(KERN_ERR
351 "mac80211: failed to install netdev notifier,"
352 " disabling per-netdev debugfs!\n");
353 } else
354 notif_registered = 1;
355}
356
357void ieee80211_debugfs_netdev_exit(void)
358{
359 unregister_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
360 notif_registered = 0;
361} 421}
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index 7af731f0b73..79025e79f4d 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -6,8 +6,7 @@
6#ifdef CONFIG_MAC80211_DEBUGFS 6#ifdef CONFIG_MAC80211_DEBUGFS
7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); 7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); 8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
9void ieee80211_debugfs_netdev_init(void); 9void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata);
10void ieee80211_debugfs_netdev_exit(void);
11#else 10#else
12static inline void ieee80211_debugfs_add_netdev( 11static inline void ieee80211_debugfs_add_netdev(
13 struct ieee80211_sub_if_data *sdata) 12 struct ieee80211_sub_if_data *sdata)
@@ -15,10 +14,8 @@ static inline void ieee80211_debugfs_add_netdev(
15static inline void ieee80211_debugfs_remove_netdev( 14static inline void ieee80211_debugfs_remove_netdev(
16 struct ieee80211_sub_if_data *sdata) 15 struct ieee80211_sub_if_data *sdata)
17{} 16{}
18static inline void ieee80211_debugfs_netdev_init(void) 17static inline void ieee80211_debugfs_rename_netdev(
19{} 18 struct ieee80211_sub_if_data *sdata)
20
21static inline void ieee80211_debugfs_netdev_exit(void)
22{} 19{}
23#endif 20#endif
24 21
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3f41608c808..0d4a759ba72 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -44,7 +44,7 @@ static const struct file_operations sta_ ##name## _ops = { \
44 STA_OPS(name) 44 STA_OPS(name)
45 45
46STA_FILE(aid, sta.aid, D); 46STA_FILE(aid, sta.aid, D);
47STA_FILE(dev, sdata->dev->name, S); 47STA_FILE(dev, sdata->name, S);
48STA_FILE(rx_packets, rx_packets, LU); 48STA_FILE(rx_packets, rx_packets, LU);
49STA_FILE(tx_packets, tx_packets, LU); 49STA_FILE(tx_packets, tx_packets, LU);
50STA_FILE(rx_bytes, rx_bytes, LU); 50STA_FILE(rx_bytes, rx_bytes, LU);
@@ -160,7 +160,12 @@ STA_OPS(agg_status);
160static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, 160static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
161 size_t count, loff_t *ppos) 161 size_t count, loff_t *ppos)
162{ 162{
163 char buf[200], *p = buf; 163#define PRINT_HT_CAP(_cond, _str) \
164 do { \
165 if (_cond) \
166 p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
167 } while (0)
168 char buf[1024], *p = buf;
164 int i; 169 int i;
165 struct sta_info *sta = file->private_data; 170 struct sta_info *sta = file->private_data;
166 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; 171 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
@@ -168,15 +173,64 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
168 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", 173 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
169 htc->ht_supported ? "" : "not "); 174 htc->ht_supported ? "" : "not ");
170 if (htc->ht_supported) { 175 if (htc->ht_supported) {
171 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.2x\n", htc->cap); 176 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
177
178 PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP");
179 PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
180 PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
181
182 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save");
183 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save");
184 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled");
185
186 PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield");
187 PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI");
188 PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI");
189 PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC");
190
191 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC");
192 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream");
193 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams");
194 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams");
195
196 PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
197
198 PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
199 "3839 bytes");
200 PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
201 "7935 bytes");
202
203 /*
204 * For beacons and probe response this would mean the BSS
205 * does or does not allow the usage of DSSS/CCK HT40.
206 * Otherwise it means the STA does or does not use
207 * DSSS/CCK HT40.
208 */
209 PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40");
210 PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40");
211
212 /* BIT(13) is reserved */
213
214 PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant");
215
216 PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection");
217
172 p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", 218 p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
173 htc->ampdu_factor, htc->ampdu_density); 219 htc->ampdu_factor, htc->ampdu_density);
174 p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); 220 p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
221
175 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) 222 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
176 p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", 223 p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
177 htc->mcs.rx_mask[i]); 224 htc->mcs.rx_mask[i]);
178 p += scnprintf(p, sizeof(buf)+buf-p, "\nMCS rx highest: %d\n", 225 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
179 le16_to_cpu(htc->mcs.rx_highest)); 226
227 /* If not set this is meaningless */
228 if (le16_to_cpu(htc->mcs.rx_highest)) {
229 p += scnprintf(p, sizeof(buf)+buf-p,
230 "MCS rx highest: %d Mbps\n",
231 le16_to_cpu(htc->mcs.rx_highest));
232 }
233
180 p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", 234 p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
181 htc->mcs.tx_params); 235 htc->mcs.tx_params);
182 } 236 }
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 921dd9c9ff6..de91d39e027 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -14,6 +14,8 @@ static inline int drv_start(struct ieee80211_local *local)
14{ 14{
15 int ret; 15 int ret;
16 16
17 might_sleep();
18
17 local->started = true; 19 local->started = true;
18 smp_mb(); 20 smp_mb();
19 ret = local->ops->start(&local->hw); 21 ret = local->ops->start(&local->hw);
@@ -23,6 +25,8 @@ static inline int drv_start(struct ieee80211_local *local)
23 25
24static inline void drv_stop(struct ieee80211_local *local) 26static inline void drv_stop(struct ieee80211_local *local)
25{ 27{
28 might_sleep();
29
26 local->ops->stop(&local->hw); 30 local->ops->stop(&local->hw);
27 trace_drv_stop(local); 31 trace_drv_stop(local);
28 32
@@ -36,35 +40,47 @@ static inline void drv_stop(struct ieee80211_local *local)
36} 40}
37 41
38static inline int drv_add_interface(struct ieee80211_local *local, 42static inline int drv_add_interface(struct ieee80211_local *local,
39 struct ieee80211_if_init_conf *conf) 43 struct ieee80211_vif *vif)
40{ 44{
41 int ret = local->ops->add_interface(&local->hw, conf); 45 int ret;
42 trace_drv_add_interface(local, conf->mac_addr, conf->vif, ret); 46
47 might_sleep();
48
49 ret = local->ops->add_interface(&local->hw, vif);
50 trace_drv_add_interface(local, vif_to_sdata(vif), ret);
43 return ret; 51 return ret;
44} 52}
45 53
46static inline void drv_remove_interface(struct ieee80211_local *local, 54static inline void drv_remove_interface(struct ieee80211_local *local,
47 struct ieee80211_if_init_conf *conf) 55 struct ieee80211_vif *vif)
48{ 56{
49 local->ops->remove_interface(&local->hw, conf); 57 might_sleep();
50 trace_drv_remove_interface(local, conf->mac_addr, conf->vif); 58
59 local->ops->remove_interface(&local->hw, vif);
60 trace_drv_remove_interface(local, vif_to_sdata(vif));
51} 61}
52 62
53static inline int drv_config(struct ieee80211_local *local, u32 changed) 63static inline int drv_config(struct ieee80211_local *local, u32 changed)
54{ 64{
55 int ret = local->ops->config(&local->hw, changed); 65 int ret;
66
67 might_sleep();
68
69 ret = local->ops->config(&local->hw, changed);
56 trace_drv_config(local, changed, ret); 70 trace_drv_config(local, changed, ret);
57 return ret; 71 return ret;
58} 72}
59 73
60static inline void drv_bss_info_changed(struct ieee80211_local *local, 74static inline void drv_bss_info_changed(struct ieee80211_local *local,
61 struct ieee80211_vif *vif, 75 struct ieee80211_sub_if_data *sdata,
62 struct ieee80211_bss_conf *info, 76 struct ieee80211_bss_conf *info,
63 u32 changed) 77 u32 changed)
64{ 78{
79 might_sleep();
80
65 if (local->ops->bss_info_changed) 81 if (local->ops->bss_info_changed)
66 local->ops->bss_info_changed(&local->hw, vif, info, changed); 82 local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
67 trace_drv_bss_info_changed(local, vif, info, changed); 83 trace_drv_bss_info_changed(local, sdata, info, changed);
68} 84}
69 85
70static inline u64 drv_prepare_multicast(struct ieee80211_local *local, 86static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -106,12 +122,17 @@ static inline int drv_set_tim(struct ieee80211_local *local,
106} 122}
107 123
108static inline int drv_set_key(struct ieee80211_local *local, 124static inline int drv_set_key(struct ieee80211_local *local,
109 enum set_key_cmd cmd, struct ieee80211_vif *vif, 125 enum set_key_cmd cmd,
126 struct ieee80211_sub_if_data *sdata,
110 struct ieee80211_sta *sta, 127 struct ieee80211_sta *sta,
111 struct ieee80211_key_conf *key) 128 struct ieee80211_key_conf *key)
112{ 129{
113 int ret = local->ops->set_key(&local->hw, cmd, vif, sta, key); 130 int ret;
114 trace_drv_set_key(local, cmd, vif, sta, key, ret); 131
132 might_sleep();
133
134 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
135 trace_drv_set_key(local, cmd, sdata, sta, key, ret);
115 return ret; 136 return ret;
116} 137}
117 138
@@ -120,6 +141,8 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
120 const u8 *address, u32 iv32, 141 const u8 *address, u32 iv32,
121 u16 *phase1key) 142 u16 *phase1key)
122{ 143{
144 might_sleep();
145
123 if (local->ops->update_tkip_key) 146 if (local->ops->update_tkip_key)
124 local->ops->update_tkip_key(&local->hw, conf, address, 147 local->ops->update_tkip_key(&local->hw, conf, address,
125 iv32, phase1key); 148 iv32, phase1key);
@@ -129,13 +152,19 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
129static inline int drv_hw_scan(struct ieee80211_local *local, 152static inline int drv_hw_scan(struct ieee80211_local *local,
130 struct cfg80211_scan_request *req) 153 struct cfg80211_scan_request *req)
131{ 154{
132 int ret = local->ops->hw_scan(&local->hw, req); 155 int ret;
156
157 might_sleep();
158
159 ret = local->ops->hw_scan(&local->hw, req);
133 trace_drv_hw_scan(local, req, ret); 160 trace_drv_hw_scan(local, req, ret);
134 return ret; 161 return ret;
135} 162}
136 163
137static inline void drv_sw_scan_start(struct ieee80211_local *local) 164static inline void drv_sw_scan_start(struct ieee80211_local *local)
138{ 165{
166 might_sleep();
167
139 if (local->ops->sw_scan_start) 168 if (local->ops->sw_scan_start)
140 local->ops->sw_scan_start(&local->hw); 169 local->ops->sw_scan_start(&local->hw);
141 trace_drv_sw_scan_start(local); 170 trace_drv_sw_scan_start(local);
@@ -143,6 +172,8 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local)
143 172
144static inline void drv_sw_scan_complete(struct ieee80211_local *local) 173static inline void drv_sw_scan_complete(struct ieee80211_local *local)
145{ 174{
175 might_sleep();
176
146 if (local->ops->sw_scan_complete) 177 if (local->ops->sw_scan_complete)
147 local->ops->sw_scan_complete(&local->hw); 178 local->ops->sw_scan_complete(&local->hw);
148 trace_drv_sw_scan_complete(local); 179 trace_drv_sw_scan_complete(local);
@@ -153,6 +184,8 @@ static inline int drv_get_stats(struct ieee80211_local *local,
153{ 184{
154 int ret = -EOPNOTSUPP; 185 int ret = -EOPNOTSUPP;
155 186
187 might_sleep();
188
156 if (local->ops->get_stats) 189 if (local->ops->get_stats)
157 ret = local->ops->get_stats(&local->hw, stats); 190 ret = local->ops->get_stats(&local->hw, stats);
158 trace_drv_get_stats(local, stats, ret); 191 trace_drv_get_stats(local, stats, ret);
@@ -172,26 +205,47 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
172 u32 value) 205 u32 value)
173{ 206{
174 int ret = 0; 207 int ret = 0;
208
209 might_sleep();
210
175 if (local->ops->set_rts_threshold) 211 if (local->ops->set_rts_threshold)
176 ret = local->ops->set_rts_threshold(&local->hw, value); 212 ret = local->ops->set_rts_threshold(&local->hw, value);
177 trace_drv_set_rts_threshold(local, value, ret); 213 trace_drv_set_rts_threshold(local, value, ret);
178 return ret; 214 return ret;
179} 215}
180 216
217static inline int drv_set_coverage_class(struct ieee80211_local *local,
218 u8 value)
219{
220 int ret = 0;
221 might_sleep();
222
223 if (local->ops->set_coverage_class)
224 local->ops->set_coverage_class(&local->hw, value);
225 else
226 ret = -EOPNOTSUPP;
227
228 trace_drv_set_coverage_class(local, value, ret);
229 return ret;
230}
231
181static inline void drv_sta_notify(struct ieee80211_local *local, 232static inline void drv_sta_notify(struct ieee80211_local *local,
182 struct ieee80211_vif *vif, 233 struct ieee80211_sub_if_data *sdata,
183 enum sta_notify_cmd cmd, 234 enum sta_notify_cmd cmd,
184 struct ieee80211_sta *sta) 235 struct ieee80211_sta *sta)
185{ 236{
186 if (local->ops->sta_notify) 237 if (local->ops->sta_notify)
187 local->ops->sta_notify(&local->hw, vif, cmd, sta); 238 local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
188 trace_drv_sta_notify(local, vif, cmd, sta); 239 trace_drv_sta_notify(local, sdata, cmd, sta);
189} 240}
190 241
191static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, 242static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
192 const struct ieee80211_tx_queue_params *params) 243 const struct ieee80211_tx_queue_params *params)
193{ 244{
194 int ret = -EOPNOTSUPP; 245 int ret = -EOPNOTSUPP;
246
247 might_sleep();
248
195 if (local->ops->conf_tx) 249 if (local->ops->conf_tx)
196 ret = local->ops->conf_tx(&local->hw, queue, params); 250 ret = local->ops->conf_tx(&local->hw, queue, params);
197 trace_drv_conf_tx(local, queue, params, ret); 251 trace_drv_conf_tx(local, queue, params, ret);
@@ -209,6 +263,9 @@ static inline int drv_get_tx_stats(struct ieee80211_local *local,
209static inline u64 drv_get_tsf(struct ieee80211_local *local) 263static inline u64 drv_get_tsf(struct ieee80211_local *local)
210{ 264{
211 u64 ret = -1ULL; 265 u64 ret = -1ULL;
266
267 might_sleep();
268
212 if (local->ops->get_tsf) 269 if (local->ops->get_tsf)
213 ret = local->ops->get_tsf(&local->hw); 270 ret = local->ops->get_tsf(&local->hw);
214 trace_drv_get_tsf(local, ret); 271 trace_drv_get_tsf(local, ret);
@@ -217,6 +274,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local)
217 274
218static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) 275static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
219{ 276{
277 might_sleep();
278
220 if (local->ops->set_tsf) 279 if (local->ops->set_tsf)
221 local->ops->set_tsf(&local->hw, tsf); 280 local->ops->set_tsf(&local->hw, tsf);
222 trace_drv_set_tsf(local, tsf); 281 trace_drv_set_tsf(local, tsf);
@@ -224,6 +283,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
224 283
225static inline void drv_reset_tsf(struct ieee80211_local *local) 284static inline void drv_reset_tsf(struct ieee80211_local *local)
226{ 285{
286 might_sleep();
287
227 if (local->ops->reset_tsf) 288 if (local->ops->reset_tsf)
228 local->ops->reset_tsf(&local->hw); 289 local->ops->reset_tsf(&local->hw);
229 trace_drv_reset_tsf(local); 290 trace_drv_reset_tsf(local);
@@ -232,6 +293,9 @@ static inline void drv_reset_tsf(struct ieee80211_local *local)
232static inline int drv_tx_last_beacon(struct ieee80211_local *local) 293static inline int drv_tx_last_beacon(struct ieee80211_local *local)
233{ 294{
234 int ret = 1; 295 int ret = 1;
296
297 might_sleep();
298
235 if (local->ops->tx_last_beacon) 299 if (local->ops->tx_last_beacon)
236 ret = local->ops->tx_last_beacon(&local->hw); 300 ret = local->ops->tx_last_beacon(&local->hw);
237 trace_drv_tx_last_beacon(local, ret); 301 trace_drv_tx_last_beacon(local, ret);
@@ -239,23 +303,34 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
239} 303}
240 304
241static inline int drv_ampdu_action(struct ieee80211_local *local, 305static inline int drv_ampdu_action(struct ieee80211_local *local,
242 struct ieee80211_vif *vif, 306 struct ieee80211_sub_if_data *sdata,
243 enum ieee80211_ampdu_mlme_action action, 307 enum ieee80211_ampdu_mlme_action action,
244 struct ieee80211_sta *sta, u16 tid, 308 struct ieee80211_sta *sta, u16 tid,
245 u16 *ssn) 309 u16 *ssn)
246{ 310{
247 int ret = -EOPNOTSUPP; 311 int ret = -EOPNOTSUPP;
248 if (local->ops->ampdu_action) 312 if (local->ops->ampdu_action)
249 ret = local->ops->ampdu_action(&local->hw, vif, action, 313 ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
250 sta, tid, ssn); 314 sta, tid, ssn);
251 trace_drv_ampdu_action(local, vif, action, sta, tid, ssn, ret); 315 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret);
252 return ret; 316 return ret;
253} 317}
254 318
255 319
256static inline void drv_rfkill_poll(struct ieee80211_local *local) 320static inline void drv_rfkill_poll(struct ieee80211_local *local)
257{ 321{
322 might_sleep();
323
258 if (local->ops->rfkill_poll) 324 if (local->ops->rfkill_poll)
259 local->ops->rfkill_poll(&local->hw); 325 local->ops->rfkill_poll(&local->hw);
260} 326}
327
328static inline void drv_flush(struct ieee80211_local *local, bool drop)
329{
330 might_sleep();
331
332 trace_drv_flush(local, drop);
333 if (local->ops->flush)
334 local->ops->flush(&local->hw, drop);
335}
261#endif /* __MAC80211_DRIVER_OPS */ 336#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index da8497ef706..d6bd9f51740 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -25,10 +25,12 @@ static inline void trace_ ## name(proto) {}
25#define STA_PR_FMT " sta:%pM" 25#define STA_PR_FMT " sta:%pM"
26#define STA_PR_ARG __entry->sta_addr 26#define STA_PR_ARG __entry->sta_addr
27 27
28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, vif) 28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
29#define VIF_ASSIGN __entry->vif_type = vif ? vif->type : 0; __entry->vif = vif 29 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
30#define VIF_PR_FMT " vif:%p(%d)" 30#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
31#define VIF_PR_ARG __entry->vif, __entry->vif_type 31 __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
32#define VIF_PR_FMT " vif:%s(%d)"
33#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
32 34
33TRACE_EVENT(drv_start, 35TRACE_EVENT(drv_start,
34 TP_PROTO(struct ieee80211_local *local, int ret), 36 TP_PROTO(struct ieee80211_local *local, int ret),
@@ -70,11 +72,10 @@ TRACE_EVENT(drv_stop,
70 72
71TRACE_EVENT(drv_add_interface, 73TRACE_EVENT(drv_add_interface,
72 TP_PROTO(struct ieee80211_local *local, 74 TP_PROTO(struct ieee80211_local *local,
73 const u8 *addr, 75 struct ieee80211_sub_if_data *sdata,
74 struct ieee80211_vif *vif,
75 int ret), 76 int ret),
76 77
77 TP_ARGS(local, addr, vif, ret), 78 TP_ARGS(local, sdata, ret),
78 79
79 TP_STRUCT__entry( 80 TP_STRUCT__entry(
80 LOCAL_ENTRY 81 LOCAL_ENTRY
@@ -86,7 +87,7 @@ TRACE_EVENT(drv_add_interface,
86 TP_fast_assign( 87 TP_fast_assign(
87 LOCAL_ASSIGN; 88 LOCAL_ASSIGN;
88 VIF_ASSIGN; 89 VIF_ASSIGN;
89 memcpy(__entry->addr, addr, 6); 90 memcpy(__entry->addr, sdata->vif.addr, 6);
90 __entry->ret = ret; 91 __entry->ret = ret;
91 ), 92 ),
92 93
@@ -97,10 +98,9 @@ TRACE_EVENT(drv_add_interface,
97); 98);
98 99
99TRACE_EVENT(drv_remove_interface, 100TRACE_EVENT(drv_remove_interface,
100 TP_PROTO(struct ieee80211_local *local, 101 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata),
101 const u8 *addr, struct ieee80211_vif *vif),
102 102
103 TP_ARGS(local, addr, vif), 103 TP_ARGS(local, sdata),
104 104
105 TP_STRUCT__entry( 105 TP_STRUCT__entry(
106 LOCAL_ENTRY 106 LOCAL_ENTRY
@@ -111,7 +111,7 @@ TRACE_EVENT(drv_remove_interface,
111 TP_fast_assign( 111 TP_fast_assign(
112 LOCAL_ASSIGN; 112 LOCAL_ASSIGN;
113 VIF_ASSIGN; 113 VIF_ASSIGN;
114 memcpy(__entry->addr, addr, 6); 114 memcpy(__entry->addr, sdata->vif.addr, 6);
115 ), 115 ),
116 116
117 TP_printk( 117 TP_printk(
@@ -140,6 +140,7 @@ TRACE_EVENT(drv_config,
140 __field(u8, short_frame_max_tx_count) 140 __field(u8, short_frame_max_tx_count)
141 __field(int, center_freq) 141 __field(int, center_freq)
142 __field(int, channel_type) 142 __field(int, channel_type)
143 __field(int, smps)
143 ), 144 ),
144 145
145 TP_fast_assign( 146 TP_fast_assign(
@@ -155,6 +156,7 @@ TRACE_EVENT(drv_config,
155 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count; 156 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
156 __entry->center_freq = local->hw.conf.channel->center_freq; 157 __entry->center_freq = local->hw.conf.channel->center_freq;
157 __entry->channel_type = local->hw.conf.channel_type; 158 __entry->channel_type = local->hw.conf.channel_type;
159 __entry->smps = local->hw.conf.smps_mode;
158 ), 160 ),
159 161
160 TP_printk( 162 TP_printk(
@@ -165,11 +167,11 @@ TRACE_EVENT(drv_config,
165 167
166TRACE_EVENT(drv_bss_info_changed, 168TRACE_EVENT(drv_bss_info_changed,
167 TP_PROTO(struct ieee80211_local *local, 169 TP_PROTO(struct ieee80211_local *local,
168 struct ieee80211_vif *vif, 170 struct ieee80211_sub_if_data *sdata,
169 struct ieee80211_bss_conf *info, 171 struct ieee80211_bss_conf *info,
170 u32 changed), 172 u32 changed),
171 173
172 TP_ARGS(local, vif, info, changed), 174 TP_ARGS(local, sdata, info, changed),
173 175
174 TP_STRUCT__entry( 176 TP_STRUCT__entry(
175 LOCAL_ENTRY 177 LOCAL_ENTRY
@@ -293,11 +295,11 @@ TRACE_EVENT(drv_set_tim,
293 295
294TRACE_EVENT(drv_set_key, 296TRACE_EVENT(drv_set_key,
295 TP_PROTO(struct ieee80211_local *local, 297 TP_PROTO(struct ieee80211_local *local,
296 enum set_key_cmd cmd, struct ieee80211_vif *vif, 298 enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
297 struct ieee80211_sta *sta, 299 struct ieee80211_sta *sta,
298 struct ieee80211_key_conf *key, int ret), 300 struct ieee80211_key_conf *key, int ret),
299 301
300 TP_ARGS(local, cmd, vif, sta, key, ret), 302 TP_ARGS(local, cmd, sdata, sta, key, ret),
301 303
302 TP_STRUCT__entry( 304 TP_STRUCT__entry(
303 LOCAL_ENTRY 305 LOCAL_ENTRY
@@ -489,13 +491,36 @@ TRACE_EVENT(drv_set_rts_threshold,
489 ) 491 )
490); 492);
491 493
494TRACE_EVENT(drv_set_coverage_class,
495 TP_PROTO(struct ieee80211_local *local, u8 value, int ret),
496
497 TP_ARGS(local, value, ret),
498
499 TP_STRUCT__entry(
500 LOCAL_ENTRY
501 __field(u8, value)
502 __field(int, ret)
503 ),
504
505 TP_fast_assign(
506 LOCAL_ASSIGN;
507 __entry->ret = ret;
508 __entry->value = value;
509 ),
510
511 TP_printk(
512 LOCAL_PR_FMT " value:%d ret:%d",
513 LOCAL_PR_ARG, __entry->value, __entry->ret
514 )
515);
516
492TRACE_EVENT(drv_sta_notify, 517TRACE_EVENT(drv_sta_notify,
493 TP_PROTO(struct ieee80211_local *local, 518 TP_PROTO(struct ieee80211_local *local,
494 struct ieee80211_vif *vif, 519 struct ieee80211_sub_if_data *sdata,
495 enum sta_notify_cmd cmd, 520 enum sta_notify_cmd cmd,
496 struct ieee80211_sta *sta), 521 struct ieee80211_sta *sta),
497 522
498 TP_ARGS(local, vif, cmd, sta), 523 TP_ARGS(local, sdata, cmd, sta),
499 524
500 TP_STRUCT__entry( 525 TP_STRUCT__entry(
501 LOCAL_ENTRY 526 LOCAL_ENTRY
@@ -656,12 +681,12 @@ TRACE_EVENT(drv_tx_last_beacon,
656 681
657TRACE_EVENT(drv_ampdu_action, 682TRACE_EVENT(drv_ampdu_action,
658 TP_PROTO(struct ieee80211_local *local, 683 TP_PROTO(struct ieee80211_local *local,
659 struct ieee80211_vif *vif, 684 struct ieee80211_sub_if_data *sdata,
660 enum ieee80211_ampdu_mlme_action action, 685 enum ieee80211_ampdu_mlme_action action,
661 struct ieee80211_sta *sta, u16 tid, 686 struct ieee80211_sta *sta, u16 tid,
662 u16 *ssn, int ret), 687 u16 *ssn, int ret),
663 688
664 TP_ARGS(local, vif, action, sta, tid, ssn, ret), 689 TP_ARGS(local, sdata, action, sta, tid, ssn, ret),
665 690
666 TP_STRUCT__entry( 691 TP_STRUCT__entry(
667 LOCAL_ENTRY 692 LOCAL_ENTRY
@@ -688,6 +713,27 @@ TRACE_EVENT(drv_ampdu_action,
688 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret 713 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
689 ) 714 )
690); 715);
716
717TRACE_EVENT(drv_flush,
718 TP_PROTO(struct ieee80211_local *local, bool drop),
719
720 TP_ARGS(local, drop),
721
722 TP_STRUCT__entry(
723 LOCAL_ENTRY
724 __field(bool, drop)
725 ),
726
727 TP_fast_assign(
728 LOCAL_ASSIGN;
729 __entry->drop = drop;
730 ),
731
732 TP_printk(
733 LOCAL_PR_FMT " drop:%d",
734 LOCAL_PR_ARG, __entry->drop
735 )
736);
691#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ 737#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
692 738
693#undef TRACE_INCLUDE_PATH 739#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index d7dcee68072..bb677a73b7c 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -125,7 +125,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
125 125
126 if (!skb) { 126 if (!skb) {
127 printk(KERN_ERR "%s: failed to allocate buffer " 127 printk(KERN_ERR "%s: failed to allocate buffer "
128 "for delba frame\n", sdata->dev->name); 128 "for delba frame\n", sdata->name);
129 return; 129 return;
130 } 130 }
131 131
@@ -133,10 +133,10 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
133 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 133 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
134 memset(mgmt, 0, 24); 134 memset(mgmt, 0, 24);
135 memcpy(mgmt->da, da, ETH_ALEN); 135 memcpy(mgmt->da, da, ETH_ALEN);
136 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 136 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
137 if (sdata->vif.type == NL80211_IFTYPE_AP || 137 if (sdata->vif.type == NL80211_IFTYPE_AP ||
138 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 138 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
139 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 139 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
140 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 140 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
141 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 141 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
142 142
@@ -185,3 +185,50 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
185 spin_unlock_bh(&sta->lock); 185 spin_unlock_bh(&sta->lock);
186 } 186 }
187} 187}
188
189int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
190 enum ieee80211_smps_mode smps, const u8 *da,
191 const u8 *bssid)
192{
193 struct ieee80211_local *local = sdata->local;
194 struct sk_buff *skb;
195 struct ieee80211_mgmt *action_frame;
196
197 /* 27 = header + category + action + smps mode */
198 skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom);
199 if (!skb)
200 return -ENOMEM;
201
202 skb_reserve(skb, local->hw.extra_tx_headroom);
203 action_frame = (void *)skb_put(skb, 27);
204 memcpy(action_frame->da, da, ETH_ALEN);
205 memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN);
206 memcpy(action_frame->bssid, bssid, ETH_ALEN);
207 action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
208 IEEE80211_STYPE_ACTION);
209 action_frame->u.action.category = WLAN_CATEGORY_HT;
210 action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
211 switch (smps) {
212 case IEEE80211_SMPS_AUTOMATIC:
213 case IEEE80211_SMPS_NUM_MODES:
214 WARN_ON(1);
215 case IEEE80211_SMPS_OFF:
216 action_frame->u.action.u.ht_smps.smps_control =
217 WLAN_HT_SMPS_CONTROL_DISABLED;
218 break;
219 case IEEE80211_SMPS_STATIC:
220 action_frame->u.action.u.ht_smps.smps_control =
221 WLAN_HT_SMPS_CONTROL_STATIC;
222 break;
223 case IEEE80211_SMPS_DYNAMIC:
224 action_frame->u.action.u.ht_smps.smps_control =
225 WLAN_HT_SMPS_CONTROL_DYNAMIC;
226 break;
227 }
228
229 /* we'll do more on status of this frame */
230 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
231 ieee80211_tx_skb(sdata, skb);
232
233 return 0;
234}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 1f2db647bb5..5bcde4c3fba 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -117,7 +117,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
117 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 117 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
118 IEEE80211_STYPE_PROBE_RESP); 118 IEEE80211_STYPE_PROBE_RESP);
119 memset(mgmt->da, 0xff, ETH_ALEN); 119 memset(mgmt->da, 0xff, ETH_ALEN);
120 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 120 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
121 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); 121 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
122 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); 122 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
123 mgmt->u.beacon.timestamp = cpu_to_le64(tsf); 123 mgmt->u.beacon.timestamp = cpu_to_le64(tsf);
@@ -187,15 +187,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
187static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 187static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
188 struct ieee80211_bss *bss) 188 struct ieee80211_bss *bss)
189{ 189{
190 struct cfg80211_bss *cbss =
191 container_of((void *)bss, struct cfg80211_bss, priv);
190 struct ieee80211_supported_band *sband; 192 struct ieee80211_supported_band *sband;
191 u32 basic_rates; 193 u32 basic_rates;
192 int i, j; 194 int i, j;
193 u16 beacon_int = bss->cbss.beacon_interval; 195 u16 beacon_int = cbss->beacon_interval;
194 196
195 if (beacon_int < 10) 197 if (beacon_int < 10)
196 beacon_int = 10; 198 beacon_int = 10;
197 199
198 sband = sdata->local->hw.wiphy->bands[bss->cbss.channel->band]; 200 sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
199 201
200 basic_rates = 0; 202 basic_rates = 0;
201 203
@@ -212,12 +214,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
212 } 214 }
213 } 215 }
214 216
215 __ieee80211_sta_join_ibss(sdata, bss->cbss.bssid, 217 __ieee80211_sta_join_ibss(sdata, cbss->bssid,
216 beacon_int, 218 beacon_int,
217 bss->cbss.channel, 219 cbss->channel,
218 basic_rates, 220 basic_rates,
219 bss->cbss.capability, 221 cbss->capability,
220 bss->cbss.tsf); 222 cbss->tsf);
221} 223}
222 224
223static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 225static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -229,6 +231,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
229{ 231{
230 struct ieee80211_local *local = sdata->local; 232 struct ieee80211_local *local = sdata->local;
231 int freq; 233 int freq;
234 struct cfg80211_bss *cbss;
232 struct ieee80211_bss *bss; 235 struct ieee80211_bss *bss;
233 struct sta_info *sta; 236 struct sta_info *sta;
234 struct ieee80211_channel *channel; 237 struct ieee80211_channel *channel;
@@ -252,7 +255,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
252 255
253 rcu_read_lock(); 256 rcu_read_lock();
254 257
255 sta = sta_info_get(local, mgmt->sa); 258 sta = sta_info_get(sdata, mgmt->sa);
256 if (sta) { 259 if (sta) {
257 u32 prev_rates; 260 u32 prev_rates;
258 261
@@ -266,7 +269,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
266 printk(KERN_DEBUG "%s: updated supp_rates set " 269 printk(KERN_DEBUG "%s: updated supp_rates set "
267 "for %pM based on beacon info (0x%llx | " 270 "for %pM based on beacon info (0x%llx | "
268 "0x%llx -> 0x%llx)\n", 271 "0x%llx -> 0x%llx)\n",
269 sdata->dev->name, 272 sdata->name,
270 sta->sta.addr, 273 sta->sta.addr,
271 (unsigned long long) prev_rates, 274 (unsigned long long) prev_rates,
272 (unsigned long long) supp_rates, 275 (unsigned long long) supp_rates,
@@ -283,8 +286,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
283 if (!bss) 286 if (!bss)
284 return; 287 return;
285 288
289 cbss = container_of((void *)bss, struct cfg80211_bss, priv);
290
286 /* was just updated in ieee80211_bss_info_update */ 291 /* was just updated in ieee80211_bss_info_update */
287 beacon_timestamp = bss->cbss.tsf; 292 beacon_timestamp = cbss->tsf;
288 293
289 /* check if we need to merge IBSS */ 294 /* check if we need to merge IBSS */
290 295
@@ -297,11 +302,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
297 goto put_bss; 302 goto put_bss;
298 303
299 /* not an IBSS */ 304 /* not an IBSS */
300 if (!(bss->cbss.capability & WLAN_CAPABILITY_IBSS)) 305 if (!(cbss->capability & WLAN_CAPABILITY_IBSS))
301 goto put_bss; 306 goto put_bss;
302 307
303 /* different channel */ 308 /* different channel */
304 if (bss->cbss.channel != local->oper_channel) 309 if (cbss->channel != local->oper_channel)
305 goto put_bss; 310 goto put_bss;
306 311
307 /* different SSID */ 312 /* different SSID */
@@ -311,7 +316,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
311 goto put_bss; 316 goto put_bss;
312 317
313 /* same BSSID */ 318 /* same BSSID */
314 if (memcmp(bss->cbss.bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) 319 if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
315 goto put_bss; 320 goto put_bss;
316 321
317 if (rx_status->flag & RX_FLAG_TSFT) { 322 if (rx_status->flag & RX_FLAG_TSFT) {
@@ -364,7 +369,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
364#ifdef CONFIG_MAC80211_IBSS_DEBUG 369#ifdef CONFIG_MAC80211_IBSS_DEBUG
365 printk(KERN_DEBUG "%s: beacon TSF higher than " 370 printk(KERN_DEBUG "%s: beacon TSF higher than "
366 "local TSF - IBSS merge with BSSID %pM\n", 371 "local TSF - IBSS merge with BSSID %pM\n",
367 sdata->dev->name, mgmt->bssid); 372 sdata->name, mgmt->bssid);
368#endif 373#endif
369 ieee80211_sta_join_ibss(sdata, bss); 374 ieee80211_sta_join_ibss(sdata, bss);
370 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); 375 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates);
@@ -394,7 +399,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
394 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 399 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
395 if (net_ratelimit()) 400 if (net_ratelimit())
396 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", 401 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
397 sdata->dev->name, addr); 402 sdata->name, addr);
398 return NULL; 403 return NULL;
399 } 404 }
400 405
@@ -406,7 +411,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
406 411
407#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 412#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
408 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n", 413 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n",
409 wiphy_name(local->hw.wiphy), addr, sdata->dev->name); 414 wiphy_name(local->hw.wiphy), addr, sdata->name);
410#endif 415#endif
411 416
412 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); 417 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -470,7 +475,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
470 return; 475 return;
471 476
472 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 477 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
473 "IBSS networks with same SSID (merge)\n", sdata->dev->name); 478 "IBSS networks with same SSID (merge)\n", sdata->name);
474 479
475 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len); 480 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
476} 481}
@@ -492,13 +497,13 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
492 * random number generator get different BSSID. */ 497 * random number generator get different BSSID. */
493 get_random_bytes(bssid, ETH_ALEN); 498 get_random_bytes(bssid, ETH_ALEN);
494 for (i = 0; i < ETH_ALEN; i++) 499 for (i = 0; i < ETH_ALEN; i++)
495 bssid[i] ^= sdata->dev->dev_addr[i]; 500 bssid[i] ^= sdata->vif.addr[i];
496 bssid[0] &= ~0x01; 501 bssid[0] &= ~0x01;
497 bssid[0] |= 0x02; 502 bssid[0] |= 0x02;
498 } 503 }
499 504
500 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 505 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
501 sdata->dev->name, bssid); 506 sdata->name, bssid);
502 507
503 sband = local->hw.wiphy->bands[ifibss->channel->band]; 508 sband = local->hw.wiphy->bands[ifibss->channel->band];
504 509
@@ -518,7 +523,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
518{ 523{
519 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 524 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
520 struct ieee80211_local *local = sdata->local; 525 struct ieee80211_local *local = sdata->local;
521 struct ieee80211_bss *bss; 526 struct cfg80211_bss *cbss;
522 struct ieee80211_channel *chan = NULL; 527 struct ieee80211_channel *chan = NULL;
523 const u8 *bssid = NULL; 528 const u8 *bssid = NULL;
524 int active_ibss; 529 int active_ibss;
@@ -527,7 +532,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
527 active_ibss = ieee80211_sta_active_ibss(sdata); 532 active_ibss = ieee80211_sta_active_ibss(sdata);
528#ifdef CONFIG_MAC80211_IBSS_DEBUG 533#ifdef CONFIG_MAC80211_IBSS_DEBUG
529 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", 534 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
530 sdata->dev->name, active_ibss); 535 sdata->name, active_ibss);
531#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 536#endif /* CONFIG_MAC80211_IBSS_DEBUG */
532 537
533 if (active_ibss) 538 if (active_ibss)
@@ -542,21 +547,23 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
542 chan = ifibss->channel; 547 chan = ifibss->channel;
543 if (!is_zero_ether_addr(ifibss->bssid)) 548 if (!is_zero_ether_addr(ifibss->bssid))
544 bssid = ifibss->bssid; 549 bssid = ifibss->bssid;
545 bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid, 550 cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid,
546 ifibss->ssid, ifibss->ssid_len, 551 ifibss->ssid, ifibss->ssid_len,
547 WLAN_CAPABILITY_IBSS | 552 WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY,
548 WLAN_CAPABILITY_PRIVACY, 553 capability);
549 capability); 554
555 if (cbss) {
556 struct ieee80211_bss *bss;
550 557
551 if (bss) { 558 bss = (void *)cbss->priv;
552#ifdef CONFIG_MAC80211_IBSS_DEBUG 559#ifdef CONFIG_MAC80211_IBSS_DEBUG
553 printk(KERN_DEBUG " sta_find_ibss: selected %pM current " 560 printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
554 "%pM\n", bss->cbss.bssid, ifibss->bssid); 561 "%pM\n", cbss->bssid, ifibss->bssid);
555#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 562#endif /* CONFIG_MAC80211_IBSS_DEBUG */
556 563
557 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 564 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
558 " based on configured SSID\n", 565 " based on configured SSID\n",
559 sdata->dev->name, bss->cbss.bssid); 566 sdata->name, cbss->bssid);
560 567
561 ieee80211_sta_join_ibss(sdata, bss); 568 ieee80211_sta_join_ibss(sdata, bss);
562 ieee80211_rx_bss_put(local, bss); 569 ieee80211_rx_bss_put(local, bss);
@@ -575,7 +582,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
575 } else if (time_after(jiffies, ifibss->last_scan_completed + 582 } else if (time_after(jiffies, ifibss->last_scan_completed +
576 IEEE80211_SCAN_INTERVAL)) { 583 IEEE80211_SCAN_INTERVAL)) {
577 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 584 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
578 "join\n", sdata->dev->name); 585 "join\n", sdata->name);
579 586
580 ieee80211_request_internal_scan(sdata, ifibss->ssid, 587 ieee80211_request_internal_scan(sdata, ifibss->ssid,
581 ifibss->ssid_len); 588 ifibss->ssid_len);
@@ -589,7 +596,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
589 return; 596 return;
590 } 597 }
591 printk(KERN_DEBUG "%s: IBSS not allowed on" 598 printk(KERN_DEBUG "%s: IBSS not allowed on"
592 " %d MHz\n", sdata->dev->name, 599 " %d MHz\n", sdata->name,
593 local->hw.conf.channel->center_freq); 600 local->hw.conf.channel->center_freq);
594 601
595 /* No IBSS found - decrease scan interval and continue 602 /* No IBSS found - decrease scan interval and continue
@@ -623,7 +630,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
623#ifdef CONFIG_MAC80211_IBSS_DEBUG 630#ifdef CONFIG_MAC80211_IBSS_DEBUG
624 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM" 631 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
625 " (tx_last_beacon=%d)\n", 632 " (tx_last_beacon=%d)\n",
626 sdata->dev->name, mgmt->sa, mgmt->da, 633 sdata->name, mgmt->sa, mgmt->da,
627 mgmt->bssid, tx_last_beacon); 634 mgmt->bssid, tx_last_beacon);
628#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 635#endif /* CONFIG_MAC80211_IBSS_DEBUG */
629 636
@@ -641,7 +648,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
641#ifdef CONFIG_MAC80211_IBSS_DEBUG 648#ifdef CONFIG_MAC80211_IBSS_DEBUG
642 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 649 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
643 "from %pM\n", 650 "from %pM\n",
644 sdata->dev->name, mgmt->sa); 651 sdata->name, mgmt->sa);
645#endif 652#endif
646 return; 653 return;
647 } 654 }
@@ -661,7 +668,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
661 memcpy(resp->da, mgmt->sa, ETH_ALEN); 668 memcpy(resp->da, mgmt->sa, ETH_ALEN);
662#ifdef CONFIG_MAC80211_IBSS_DEBUG 669#ifdef CONFIG_MAC80211_IBSS_DEBUG
663 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n", 670 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
664 sdata->dev->name, resp->da); 671 sdata->name, resp->da);
665#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 672#endif /* CONFIG_MAC80211_IBSS_DEBUG */
666 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 673 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
667 ieee80211_tx_skb(sdata, skb); 674 ieee80211_tx_skb(sdata, skb);
@@ -675,7 +682,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
675 size_t baselen; 682 size_t baselen;
676 struct ieee802_11_elems elems; 683 struct ieee802_11_elems elems;
677 684
678 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 685 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
679 return; /* ignore ProbeResp to foreign address */ 686 return; /* ignore ProbeResp to foreign address */
680 687
681 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 688 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -748,7 +755,7 @@ static void ieee80211_ibss_work(struct work_struct *work)
748 if (WARN_ON(local->suspended)) 755 if (WARN_ON(local->suspended))
749 return; 756 return;
750 757
751 if (!netif_running(sdata->dev)) 758 if (!ieee80211_sdata_running(sdata))
752 return; 759 return;
753 760
754 if (local->scanning) 761 if (local->scanning)
@@ -831,7 +838,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
831 838
832 mutex_lock(&local->iflist_mtx); 839 mutex_lock(&local->iflist_mtx);
833 list_for_each_entry(sdata, &local->interfaces, list) { 840 list_for_each_entry(sdata, &local->interfaces, list) {
834 if (!netif_running(sdata->dev)) 841 if (!ieee80211_sdata_running(sdata))
835 continue; 842 continue;
836 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 843 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
837 continue; 844 continue;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 91dc8636d64..c18f576f184 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -58,6 +58,15 @@ struct ieee80211_local;
58 58
59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) 59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
60 60
61#define IEEE80211_DEFAULT_UAPSD_QUEUES \
62 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
63 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
64 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
65 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
66
67#define IEEE80211_DEFAULT_MAX_SP_LEN \
68 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
69
61struct ieee80211_fragment_entry { 70struct ieee80211_fragment_entry {
62 unsigned long first_frag_time; 71 unsigned long first_frag_time;
63 unsigned int seq; 72 unsigned int seq;
@@ -71,9 +80,6 @@ struct ieee80211_fragment_entry {
71 80
72 81
73struct ieee80211_bss { 82struct ieee80211_bss {
74 /* Yes, this is a hack */
75 struct cfg80211_bss cbss;
76
77 /* don't want to look up all the time */ 83 /* don't want to look up all the time */
78 size_t ssid_len; 84 size_t ssid_len;
79 u8 ssid[IEEE80211_MAX_SSID_LEN]; 85 u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -81,6 +87,7 @@ struct ieee80211_bss {
81 u8 dtim_period; 87 u8 dtim_period;
82 88
83 bool wmm_used; 89 bool wmm_used;
90 bool uapsd_supported;
84 91
85 unsigned long last_probe_resp; 92 unsigned long last_probe_resp;
86 93
@@ -140,7 +147,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
140 147
141struct ieee80211_tx_data { 148struct ieee80211_tx_data {
142 struct sk_buff *skb; 149 struct sk_buff *skb;
143 struct net_device *dev;
144 struct ieee80211_local *local; 150 struct ieee80211_local *local;
145 struct ieee80211_sub_if_data *sdata; 151 struct ieee80211_sub_if_data *sdata;
146 struct sta_info *sta; 152 struct sta_info *sta;
@@ -228,31 +234,78 @@ struct mesh_preq_queue {
228 u8 flags; 234 u8 flags;
229}; 235};
230 236
231enum ieee80211_mgd_state { 237enum ieee80211_work_type {
232 IEEE80211_MGD_STATE_IDLE, 238 IEEE80211_WORK_ABORT,
233 IEEE80211_MGD_STATE_PROBE, 239 IEEE80211_WORK_DIRECT_PROBE,
234 IEEE80211_MGD_STATE_AUTH, 240 IEEE80211_WORK_AUTH,
235 IEEE80211_MGD_STATE_ASSOC, 241 IEEE80211_WORK_ASSOC,
242 IEEE80211_WORK_REMAIN_ON_CHANNEL,
243};
244
245/**
246 * enum work_done_result - indicates what to do after work was done
247 *
248 * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
249 * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
250 * should be requeued.
251 */
252enum work_done_result {
253 WORK_DONE_DESTROY,
254 WORK_DONE_REQUEUE,
236}; 255};
237 256
238struct ieee80211_mgd_work { 257struct ieee80211_work {
239 struct list_head list; 258 struct list_head list;
240 struct ieee80211_bss *bss; 259
241 int ie_len; 260 struct rcu_head rcu_head;
242 u8 prev_bssid[ETH_ALEN]; 261
243 u8 ssid[IEEE80211_MAX_SSID_LEN]; 262 struct ieee80211_sub_if_data *sdata;
244 u8 ssid_len; 263
264 enum work_done_result (*done)(struct ieee80211_work *wk,
265 struct sk_buff *skb);
266
267 struct ieee80211_channel *chan;
268 enum nl80211_channel_type chan_type;
269
245 unsigned long timeout; 270 unsigned long timeout;
246 enum ieee80211_mgd_state state; 271 enum ieee80211_work_type type;
247 u16 auth_alg, auth_transaction;
248 272
249 int tries; 273 u8 filter_ta[ETH_ALEN];
250 274
251 u8 key[WLAN_KEY_LEN_WEP104]; 275 bool started;
252 u8 key_len, key_idx; 276
277 union {
278 struct {
279 int tries;
280 u16 algorithm, transaction;
281 u8 ssid[IEEE80211_MAX_SSID_LEN];
282 u8 ssid_len;
283 u8 key[WLAN_KEY_LEN_WEP104];
284 u8 key_len, key_idx;
285 bool privacy;
286 } probe_auth;
287 struct {
288 struct cfg80211_bss *bss;
289 const u8 *supp_rates;
290 const u8 *ht_information_ie;
291 enum ieee80211_smps_mode smps;
292 int tries;
293 u16 capability;
294 u8 prev_bssid[ETH_ALEN];
295 u8 ssid[IEEE80211_MAX_SSID_LEN];
296 u8 ssid_len;
297 u8 supp_rates_len;
298 bool wmm_used, use_11n, uapsd_used;
299 } assoc;
300 struct {
301 u32 duration;
302 bool started;
303 } remain;
304 };
253 305
306 int ie_len;
254 /* must be last */ 307 /* must be last */
255 u8 ie[0]; /* for auth or assoc frame, not probe */ 308 u8 ie[0];
256}; 309};
257 310
258/* flags used in struct ieee80211_if_managed.flags */ 311/* flags used in struct ieee80211_if_managed.flags */
@@ -260,15 +313,10 @@ enum ieee80211_sta_flags {
260 IEEE80211_STA_BEACON_POLL = BIT(0), 313 IEEE80211_STA_BEACON_POLL = BIT(0),
261 IEEE80211_STA_CONNECTION_POLL = BIT(1), 314 IEEE80211_STA_CONNECTION_POLL = BIT(1),
262 IEEE80211_STA_CONTROL_PORT = BIT(2), 315 IEEE80211_STA_CONTROL_PORT = BIT(2),
263 IEEE80211_STA_WMM_ENABLED = BIT(3),
264 IEEE80211_STA_DISABLE_11N = BIT(4), 316 IEEE80211_STA_DISABLE_11N = BIT(4),
265 IEEE80211_STA_CSA_RECEIVED = BIT(5), 317 IEEE80211_STA_CSA_RECEIVED = BIT(5),
266 IEEE80211_STA_MFP_ENABLED = BIT(6), 318 IEEE80211_STA_MFP_ENABLED = BIT(6),
267}; 319 IEEE80211_STA_UAPSD_ENABLED = BIT(7),
268
269/* flags for MLME request */
270enum ieee80211_sta_request {
271 IEEE80211_STA_REQ_SCAN,
272}; 320};
273 321
274struct ieee80211_if_managed { 322struct ieee80211_if_managed {
@@ -285,21 +333,18 @@ struct ieee80211_if_managed {
285 int probe_send_count; 333 int probe_send_count;
286 334
287 struct mutex mtx; 335 struct mutex mtx;
288 struct ieee80211_bss *associated; 336 struct cfg80211_bss *associated;
289 struct ieee80211_mgd_work *old_associate_work;
290 struct list_head work_list;
291 337
292 u8 bssid[ETH_ALEN]; 338 u8 bssid[ETH_ALEN];
293 339
294 u16 aid; 340 u16 aid;
295 u16 capab;
296 341
297 struct sk_buff_head skb_queue; 342 struct sk_buff_head skb_queue;
298 343
299 unsigned long timers_running; /* used for quiesce/restart */ 344 unsigned long timers_running; /* used for quiesce/restart */
300 bool powersave; /* powersave requested for this iface */ 345 bool powersave; /* powersave requested for this iface */
301 346 enum ieee80211_smps_mode req_smps, /* requested smps mode */
302 unsigned long request; 347 ap_smps; /* smps mode AP thinks we're in */
303 348
304 unsigned int flags; 349 unsigned int flags;
305 350
@@ -433,6 +478,8 @@ struct ieee80211_sub_if_data {
433 478
434 int drop_unencrypted; 479 int drop_unencrypted;
435 480
481 char name[IFNAMSIZ];
482
436 /* 483 /*
437 * keep track of whether the HT opmode (stored in 484 * keep track of whether the HT opmode (stored in
438 * vif.bss_info.ht_operation_mode) is valid. 485 * vif.bss_info.ht_operation_mode) is valid.
@@ -458,8 +505,8 @@ struct ieee80211_sub_if_data {
458 */ 505 */
459 struct ieee80211_if_ap *bss; 506 struct ieee80211_if_ap *bss;
460 507
461 int force_unicast_rateidx; /* forced TX rateidx for unicast frames */ 508 /* bitmap of allowed (non-MCS) rate indexes for rate control */
462 int max_ratectrl_rateidx; /* max TX rateidx for rate control */ 509 u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
463 510
464 union { 511 union {
465 struct ieee80211_if_ap ap; 512 struct ieee80211_if_ap ap;
@@ -565,6 +612,15 @@ struct ieee80211_local {
565 const struct ieee80211_ops *ops; 612 const struct ieee80211_ops *ops;
566 613
567 /* 614 /*
615 * work stuff, potentially off-channel (in the future)
616 */
617 struct mutex work_mtx;
618 struct list_head work_list;
619 struct timer_list work_timer;
620 struct work_struct work_work;
621 struct sk_buff_head work_skb_queue;
622
623 /*
568 * private workqueue to mac80211. mac80211 makes this accessible 624 * private workqueue to mac80211. mac80211 makes this accessible
569 * via ieee80211_queue_work() 625 * via ieee80211_queue_work()
570 */ 626 */
@@ -586,6 +642,9 @@ struct ieee80211_local {
586 /* used for uploading changed mc list */ 642 /* used for uploading changed mc list */
587 struct work_struct reconfig_filter; 643 struct work_struct reconfig_filter;
588 644
645 /* used to reconfigure hardware SM PS */
646 struct work_struct recalc_smps;
647
589 /* aggregated multicast list */ 648 /* aggregated multicast list */
590 struct dev_addr_list *mc_list; 649 struct dev_addr_list *mc_list;
591 int mc_count; 650 int mc_count;
@@ -689,6 +748,10 @@ struct ieee80211_local {
689 enum nl80211_channel_type oper_channel_type; 748 enum nl80211_channel_type oper_channel_type;
690 struct ieee80211_channel *oper_channel, *csa_channel; 749 struct ieee80211_channel *oper_channel, *csa_channel;
691 750
751 /* Temporary remain-on-channel for off-channel operations */
752 struct ieee80211_channel *tmp_channel;
753 enum nl80211_channel_type tmp_channel_type;
754
692 /* SNMP counters */ 755 /* SNMP counters */
693 /* dot11CountersTable */ 756 /* dot11CountersTable */
694 u32 dot11TransmittedFragmentCount; 757 u32 dot11TransmittedFragmentCount;
@@ -745,8 +808,22 @@ struct ieee80211_local {
745 int wifi_wme_noack_test; 808 int wifi_wme_noack_test;
746 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 809 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
747 810
811 /*
812 * Bitmask of enabled u-apsd queues,
813 * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
814 * to take effect.
815 */
816 unsigned int uapsd_queues;
817
818 /*
819 * Maximum number of buffered frames AP can deliver during a
820 * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
821 * Needs a new association to take effect.
822 */
823 unsigned int uapsd_max_sp_len;
824
748 bool pspolling; 825 bool pspolling;
749 bool scan_ps_enabled; 826 bool offchannel_ps_enabled;
750 /* 827 /*
751 * PS can only be enabled when we have exactly one managed 828 * PS can only be enabled when we have exactly one managed
752 * interface (and monitors) in PS, this then points there. 829 * interface (and monitors) in PS, this then points there.
@@ -760,6 +837,8 @@ struct ieee80211_local {
760 int user_power_level; /* in dBm */ 837 int user_power_level; /* in dBm */
761 int power_constr_level; /* in dBm */ 838 int power_constr_level; /* in dBm */
762 839
840 enum ieee80211_smps_mode smps_mode;
841
763 struct work_struct restart_work; 842 struct work_struct restart_work;
764 843
765#ifdef CONFIG_MAC80211_DEBUGFS 844#ifdef CONFIG_MAC80211_DEBUGFS
@@ -874,6 +953,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
874void ieee80211_configure_filter(struct ieee80211_local *local); 953void ieee80211_configure_filter(struct ieee80211_local *local);
875u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); 954u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
876 955
956extern bool ieee80211_disable_40mhz_24ghz;
957
877/* STA code */ 958/* STA code */
878void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); 959void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
879int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 960int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -937,7 +1018,15 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
937void ieee80211_rx_bss_put(struct ieee80211_local *local, 1018void ieee80211_rx_bss_put(struct ieee80211_local *local,
938 struct ieee80211_bss *bss); 1019 struct ieee80211_bss *bss);
939 1020
1021/* off-channel helpers */
1022void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
1023void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
1024void ieee80211_offchannel_return(struct ieee80211_local *local,
1025 bool enable_beaconing);
1026
940/* interface handling */ 1027/* interface handling */
1028int ieee80211_iface_init(void);
1029void ieee80211_iface_exit(void);
941int ieee80211_if_add(struct ieee80211_local *local, const char *name, 1030int ieee80211_if_add(struct ieee80211_local *local, const char *name,
942 struct net_device **new_dev, enum nl80211_iftype type, 1031 struct net_device **new_dev, enum nl80211_iftype type,
943 struct vif_params *params); 1032 struct vif_params *params);
@@ -948,6 +1037,11 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
948u32 __ieee80211_recalc_idle(struct ieee80211_local *local); 1037u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
949void ieee80211_recalc_idle(struct ieee80211_local *local); 1038void ieee80211_recalc_idle(struct ieee80211_local *local);
950 1039
1040static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
1041{
1042 return netif_running(sdata->dev);
1043}
1044
951/* tx handling */ 1045/* tx handling */
952void ieee80211_clear_tx_pending(struct ieee80211_local *local); 1046void ieee80211_clear_tx_pending(struct ieee80211_local *local);
953void ieee80211_tx_pending(unsigned long data); 1047void ieee80211_tx_pending(unsigned long data);
@@ -976,6 +1070,9 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
976void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1070void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
977 const u8 *da, u16 tid, 1071 const u8 *da, u16 tid,
978 u16 initiator, u16 reason_code); 1072 u16 initiator, u16 reason_code);
1073int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
1074 enum ieee80211_smps_mode smps, const u8 *da,
1075 const u8 *bssid);
979 1076
980void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da, 1077void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
981 u16 tid, u16 initiator, u16 reason); 1078 u16 tid, u16 initiator, u16 reason);
@@ -1086,6 +1183,28 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1086u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1183u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1087 struct ieee802_11_elems *elems, 1184 struct ieee802_11_elems *elems,
1088 enum ieee80211_band band); 1185 enum ieee80211_band band);
1186int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1187 enum ieee80211_smps_mode smps_mode);
1188void ieee80211_recalc_smps(struct ieee80211_local *local,
1189 struct ieee80211_sub_if_data *forsdata);
1190
1191size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1192 const u8 *ids, int n_ids, size_t offset);
1193size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
1194
1195/* internal work items */
1196void ieee80211_work_init(struct ieee80211_local *local);
1197void ieee80211_add_work(struct ieee80211_work *wk);
1198void free_work(struct ieee80211_work *wk);
1199void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
1200ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1201 struct sk_buff *skb);
1202int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1203 struct ieee80211_channel *chan,
1204 enum nl80211_channel_type channel_type,
1205 unsigned int duration, u64 *cookie);
1206int ieee80211_wk_cancel_remain_on_channel(
1207 struct ieee80211_sub_if_data *sdata, u64 cookie);
1089 1208
1090#ifdef CONFIG_MAC80211_NOINLINE 1209#ifdef CONFIG_MAC80211_NOINLINE
1091#define debug_noinline noinline 1210#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 32abae3ce32..edf21cebeee 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -62,6 +62,23 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
62 return 0; 62 return 0;
63} 63}
64 64
65static int ieee80211_change_mac(struct net_device *dev, void *addr)
66{
67 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
68 struct sockaddr *sa = addr;
69 int ret;
70
71 if (ieee80211_sdata_running(sdata))
72 return -EBUSY;
73
74 ret = eth_mac_addr(dev, sa);
75
76 if (ret == 0)
77 memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
78
79 return ret;
80}
81
65static inline int identical_mac_addr_allowed(int type1, int type2) 82static inline int identical_mac_addr_allowed(int type1, int type2)
66{ 83{
67 return type1 == NL80211_IFTYPE_MONITOR || 84 return type1 == NL80211_IFTYPE_MONITOR ||
@@ -82,7 +99,6 @@ static int ieee80211_open(struct net_device *dev)
82 struct ieee80211_sub_if_data *nsdata; 99 struct ieee80211_sub_if_data *nsdata;
83 struct ieee80211_local *local = sdata->local; 100 struct ieee80211_local *local = sdata->local;
84 struct sta_info *sta; 101 struct sta_info *sta;
85 struct ieee80211_if_init_conf conf;
86 u32 changed = 0; 102 u32 changed = 0;
87 int res; 103 int res;
88 u32 hw_reconf_flags = 0; 104 u32 hw_reconf_flags = 0;
@@ -97,7 +113,7 @@ static int ieee80211_open(struct net_device *dev)
97 list_for_each_entry(nsdata, &local->interfaces, list) { 113 list_for_each_entry(nsdata, &local->interfaces, list) {
98 struct net_device *ndev = nsdata->dev; 114 struct net_device *ndev = nsdata->dev;
99 115
100 if (ndev != dev && netif_running(ndev)) { 116 if (ndev != dev && ieee80211_sdata_running(nsdata)) {
101 /* 117 /*
102 * Allow only a single IBSS interface to be up at any 118 * Allow only a single IBSS interface to be up at any
103 * time. This is restricted because beacon distribution 119 * time. This is restricted because beacon distribution
@@ -183,7 +199,7 @@ static int ieee80211_open(struct net_device *dev)
183 struct net_device *ndev = nsdata->dev; 199 struct net_device *ndev = nsdata->dev;
184 200
185 /* 201 /*
186 * No need to check netif_running since we do not allow 202 * No need to check running since we do not allow
187 * it to start up with this invalid address. 203 * it to start up with this invalid address.
188 */ 204 */
189 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) { 205 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) {
@@ -234,10 +250,7 @@ static int ieee80211_open(struct net_device *dev)
234 ieee80211_configure_filter(local); 250 ieee80211_configure_filter(local);
235 break; 251 break;
236 default: 252 default:
237 conf.vif = &sdata->vif; 253 res = drv_add_interface(local, &sdata->vif);
238 conf.type = sdata->vif.type;
239 conf.mac_addr = dev->dev_addr;
240 res = drv_add_interface(local, &conf);
241 if (res) 254 if (res)
242 goto err_stop; 255 goto err_stop;
243 256
@@ -320,7 +333,7 @@ static int ieee80211_open(struct net_device *dev)
320 333
321 return 0; 334 return 0;
322 err_del_interface: 335 err_del_interface:
323 drv_remove_interface(local, &conf); 336 drv_remove_interface(local, &sdata->vif);
324 err_stop: 337 err_stop:
325 if (!local->open_count) 338 if (!local->open_count)
326 drv_stop(local); 339 drv_stop(local);
@@ -335,7 +348,6 @@ static int ieee80211_stop(struct net_device *dev)
335{ 348{
336 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 349 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
337 struct ieee80211_local *local = sdata->local; 350 struct ieee80211_local *local = sdata->local;
338 struct ieee80211_if_init_conf conf;
339 struct sta_info *sta; 351 struct sta_info *sta;
340 unsigned long flags; 352 unsigned long flags;
341 struct sk_buff *skb, *tmp; 353 struct sk_buff *skb, *tmp;
@@ -348,6 +360,11 @@ static int ieee80211_stop(struct net_device *dev)
348 netif_tx_stop_all_queues(dev); 360 netif_tx_stop_all_queues(dev);
349 361
350 /* 362 /*
363 * Purge work for this interface.
364 */
365 ieee80211_work_purge(sdata);
366
367 /*
351 * Now delete all active aggregation sessions. 368 * Now delete all active aggregation sessions.
352 */ 369 */
353 rcu_read_lock(); 370 rcu_read_lock();
@@ -514,12 +531,9 @@ static int ieee80211_stop(struct net_device *dev)
514 BSS_CHANGED_BEACON_ENABLED); 531 BSS_CHANGED_BEACON_ENABLED);
515 } 532 }
516 533
517 conf.vif = &sdata->vif;
518 conf.type = sdata->vif.type;
519 conf.mac_addr = dev->dev_addr;
520 /* disable all keys for as long as this netdev is down */ 534 /* disable all keys for as long as this netdev is down */
521 ieee80211_disable_keys(sdata); 535 ieee80211_disable_keys(sdata);
522 drv_remove_interface(local, &conf); 536 drv_remove_interface(local, &sdata->vif);
523 } 537 }
524 538
525 sdata->bss = NULL; 539 sdata->bss = NULL;
@@ -659,7 +673,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
659 .ndo_start_xmit = ieee80211_subif_start_xmit, 673 .ndo_start_xmit = ieee80211_subif_start_xmit,
660 .ndo_set_multicast_list = ieee80211_set_multicast_list, 674 .ndo_set_multicast_list = ieee80211_set_multicast_list,
661 .ndo_change_mtu = ieee80211_change_mtu, 675 .ndo_change_mtu = ieee80211_change_mtu,
662 .ndo_set_mac_address = eth_mac_addr, 676 .ndo_set_mac_address = ieee80211_change_mac,
663 .ndo_select_queue = ieee80211_netdev_select_queue, 677 .ndo_select_queue = ieee80211_netdev_select_queue,
664}; 678};
665 679
@@ -779,7 +793,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
779 * and goes into the requested mode. 793 * and goes into the requested mode.
780 */ 794 */
781 795
782 if (netif_running(sdata->dev)) 796 if (ieee80211_sdata_running(sdata))
783 return -EBUSY; 797 return -EBUSY;
784 798
785 /* Purge and reset type-dependent state. */ 799 /* Purge and reset type-dependent state. */
@@ -833,6 +847,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
833 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ 847 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
834 sdata = netdev_priv(ndev); 848 sdata = netdev_priv(ndev);
835 ndev->ieee80211_ptr = &sdata->wdev; 849 ndev->ieee80211_ptr = &sdata->wdev;
850 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
851 memcpy(sdata->name, ndev->name, IFNAMSIZ);
836 852
837 /* initialise type-independent data */ 853 /* initialise type-independent data */
838 sdata->wdev.wiphy = local->hw.wiphy; 854 sdata->wdev.wiphy = local->hw.wiphy;
@@ -844,8 +860,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
844 860
845 INIT_LIST_HEAD(&sdata->key_list); 861 INIT_LIST_HEAD(&sdata->key_list);
846 862
847 sdata->force_unicast_rateidx = -1; 863 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
848 sdata->max_ratectrl_rateidx = -1; 864 struct ieee80211_supported_band *sband;
865 sband = local->hw.wiphy->bands[i];
866 sdata->rc_rateidx_mask[i] =
867 sband ? (1 << sband->n_bitrates) - 1 : 0;
868 }
849 869
850 /* setup type-dependent data */ 870 /* setup type-dependent data */
851 ieee80211_setup_sdata(sdata, type); 871 ieee80211_setup_sdata(sdata, type);
@@ -938,6 +958,8 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
938 wiphy_name(local->hw.wiphy)); 958 wiphy_name(local->hw.wiphy));
939#endif 959#endif
940 960
961 drv_flush(local, false);
962
941 local->hw.conf.flags |= IEEE80211_CONF_IDLE; 963 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
942 return IEEE80211_CONF_CHANGE_IDLE; 964 return IEEE80211_CONF_CHANGE_IDLE;
943} 965}
@@ -947,16 +969,18 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
947 struct ieee80211_sub_if_data *sdata; 969 struct ieee80211_sub_if_data *sdata;
948 int count = 0; 970 int count = 0;
949 971
972 if (!list_empty(&local->work_list))
973 return ieee80211_idle_off(local, "working");
974
950 if (local->scanning) 975 if (local->scanning)
951 return ieee80211_idle_off(local, "scanning"); 976 return ieee80211_idle_off(local, "scanning");
952 977
953 list_for_each_entry(sdata, &local->interfaces, list) { 978 list_for_each_entry(sdata, &local->interfaces, list) {
954 if (!netif_running(sdata->dev)) 979 if (!ieee80211_sdata_running(sdata))
955 continue; 980 continue;
956 /* do not count disabled managed interfaces */ 981 /* do not count disabled managed interfaces */
957 if (sdata->vif.type == NL80211_IFTYPE_STATION && 982 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
958 !sdata->u.mgd.associated && 983 !sdata->u.mgd.associated)
959 list_empty(&sdata->u.mgd.work_list))
960 continue; 984 continue;
961 /* do not count unused IBSS interfaces */ 985 /* do not count unused IBSS interfaces */
962 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 986 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
@@ -984,3 +1008,41 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
984 if (chg) 1008 if (chg)
985 ieee80211_hw_config(local, chg); 1009 ieee80211_hw_config(local, chg);
986} 1010}
1011
1012static int netdev_notify(struct notifier_block *nb,
1013 unsigned long state,
1014 void *ndev)
1015{
1016 struct net_device *dev = ndev;
1017 struct ieee80211_sub_if_data *sdata;
1018
1019 if (state != NETDEV_CHANGENAME)
1020 return 0;
1021
1022 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
1023 return 0;
1024
1025 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
1026 return 0;
1027
1028 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1029
1030 memcpy(sdata->name, sdata->name, IFNAMSIZ);
1031
1032 ieee80211_debugfs_rename_netdev(sdata);
1033 return 0;
1034}
1035
1036static struct notifier_block mac80211_netdev_notifier = {
1037 .notifier_call = netdev_notify,
1038};
1039
1040int ieee80211_iface_init(void)
1041{
1042 return register_netdevice_notifier(&mac80211_netdev_notifier);
1043}
1044
1045void ieee80211_iface_exit(void)
1046{
1047 unregister_netdevice_notifier(&mac80211_netdev_notifier);
1048}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 659a42d529e..8160d9c5372 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -139,7 +139,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
139 struct ieee80211_sub_if_data, 139 struct ieee80211_sub_if_data,
140 u.ap); 140 u.ap);
141 141
142 ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf); 142 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
143 143
144 if (!ret) { 144 if (!ret) {
145 spin_lock_bh(&todo_lock); 145 spin_lock_bh(&todo_lock);
@@ -181,7 +181,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
181 struct ieee80211_sub_if_data, 181 struct ieee80211_sub_if_data,
182 u.ap); 182 u.ap);
183 183
184 ret = drv_set_key(key->local, DISABLE_KEY, &sdata->vif, 184 ret = drv_set_key(key->local, DISABLE_KEY, sdata,
185 sta, &key->conf); 185 sta, &key->conf);
186 186
187 if (ret) 187 if (ret)
@@ -421,7 +421,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
421 */ 421 */
422 422
423 /* same here, the AP could be using QoS */ 423 /* same here, the AP could be using QoS */
424 ap = sta_info_get(key->local, key->sdata->u.mgd.bssid); 424 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
425 if (ap) { 425 if (ap) {
426 if (test_sta_flags(ap, WLAN_STA_WME)) 426 if (test_sta_flags(ap, WLAN_STA_WME))
427 key->conf.flags |= 427 key->conf.flags |=
@@ -443,7 +443,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
443 add_todo(old_key, KEY_FLAG_TODO_DELETE); 443 add_todo(old_key, KEY_FLAG_TODO_DELETE);
444 444
445 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS); 445 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
446 if (netif_running(sdata->dev)) 446 if (ieee80211_sdata_running(sdata))
447 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD); 447 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
448 448
449 spin_unlock_irqrestore(&sdata->local->key_lock, flags); 449 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
@@ -509,7 +509,7 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
509{ 509{
510 ASSERT_RTNL(); 510 ASSERT_RTNL();
511 511
512 if (WARN_ON(!netif_running(sdata->dev))) 512 if (WARN_ON(!ieee80211_sdata_running(sdata)))
513 return; 513 return;
514 514
515 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD); 515 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index a49f93b79e9..bdc2968c2bb 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -59,11 +59,17 @@ enum ieee80211_internal_key_flags {
59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6), 59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6),
60}; 60};
61 61
62enum ieee80211_internal_tkip_state {
63 TKIP_STATE_NOT_INIT,
64 TKIP_STATE_PHASE1_DONE,
65 TKIP_STATE_PHASE1_HW_UPLOADED,
66};
67
62struct tkip_ctx { 68struct tkip_ctx {
63 u32 iv32; 69 u32 iv32;
64 u16 iv16; 70 u16 iv16;
65 u16 p1k[5]; 71 u16 p1k[5];
66 int initialized; 72 enum ieee80211_internal_tkip_state state;
67}; 73};
68 74
69struct ieee80211_key { 75struct ieee80211_key {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0d2d94881f1..ec8f767ba95 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -17,7 +17,6 @@
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/if_arp.h> 19#include <linux/if_arp.h>
20#include <linux/wireless.h>
21#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
22#include <linux/bitmap.h> 21#include <linux/bitmap.h>
23#include <linux/pm_qos_params.h> 22#include <linux/pm_qos_params.h>
@@ -32,7 +31,12 @@
32#include "led.h" 31#include "led.h"
33#include "cfg.h" 32#include "cfg.h"
34#include "debugfs.h" 33#include "debugfs.h"
35#include "debugfs_netdev.h" 34
35
36bool ieee80211_disable_40mhz_24ghz;
37module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
38MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
39 "Disable 40MHz support in the 2.4GHz band");
36 40
37void ieee80211_configure_filter(struct ieee80211_local *local) 41void ieee80211_configure_filter(struct ieee80211_local *local)
38{ 42{
@@ -102,6 +106,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
102 if (scan_chan) { 106 if (scan_chan) {
103 chan = scan_chan; 107 chan = scan_chan;
104 channel_type = NL80211_CHAN_NO_HT; 108 channel_type = NL80211_CHAN_NO_HT;
109 } else if (local->tmp_channel) {
110 chan = scan_chan = local->tmp_channel;
111 channel_type = local->tmp_channel_type;
105 } else { 112 } else {
106 chan = local->oper_channel; 113 chan = local->oper_channel;
107 channel_type = local->oper_channel_type; 114 channel_type = local->oper_channel_type;
@@ -114,6 +121,18 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
114 changed |= IEEE80211_CONF_CHANGE_CHANNEL; 121 changed |= IEEE80211_CONF_CHANGE_CHANNEL;
115 } 122 }
116 123
124 if (!conf_is_ht(&local->hw.conf)) {
125 /*
126 * mac80211.h documents that this is only valid
127 * when the channel is set to an HT type, and
128 * that otherwise STATIC is used.
129 */
130 local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
131 } else if (local->hw.conf.smps_mode != local->smps_mode) {
132 local->hw.conf.smps_mode = local->smps_mode;
133 changed |= IEEE80211_CONF_CHANGE_SMPS;
134 }
135
117 if (scan_chan) 136 if (scan_chan)
118 power = chan->max_power; 137 power = chan->max_power;
119 else 138 else
@@ -173,7 +192,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
173 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 192 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
174 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; 193 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
175 else if (sdata->vif.type == NL80211_IFTYPE_AP) 194 else if (sdata->vif.type == NL80211_IFTYPE_AP)
176 sdata->vif.bss_conf.bssid = sdata->dev->dev_addr; 195 sdata->vif.bss_conf.bssid = sdata->vif.addr;
177 else if (ieee80211_vif_is_mesh(&sdata->vif)) { 196 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
178 sdata->vif.bss_conf.bssid = zero; 197 sdata->vif.bss_conf.bssid = zero;
179 } else { 198 } else {
@@ -195,7 +214,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
195 } 214 }
196 215
197 if (changed & BSS_CHANGED_BEACON_ENABLED) { 216 if (changed & BSS_CHANGED_BEACON_ENABLED) {
198 if (local->quiescing || !netif_running(sdata->dev) || 217 if (local->quiescing || !ieee80211_sdata_running(sdata) ||
199 test_bit(SCAN_SW_SCANNING, &local->scanning)) { 218 test_bit(SCAN_SW_SCANNING, &local->scanning)) {
200 sdata->vif.bss_conf.enable_beacon = false; 219 sdata->vif.bss_conf.enable_beacon = false;
201 } else { 220 } else {
@@ -223,8 +242,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
223 } 242 }
224 } 243 }
225 244
226 drv_bss_info_changed(local, &sdata->vif, 245 drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed);
227 &sdata->vif.bss_conf, changed);
228} 246}
229 247
230u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) 248u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -299,6 +317,16 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
299} 317}
300EXPORT_SYMBOL(ieee80211_restart_hw); 318EXPORT_SYMBOL(ieee80211_restart_hw);
301 319
320static void ieee80211_recalc_smps_work(struct work_struct *work)
321{
322 struct ieee80211_local *local =
323 container_of(work, struct ieee80211_local, recalc_smps);
324
325 mutex_lock(&local->iflist_mtx);
326 ieee80211_recalc_smps(local, NULL);
327 mutex_unlock(&local->iflist_mtx);
328}
329
302struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 330struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
303 const struct ieee80211_ops *ops) 331 const struct ieee80211_ops *ops)
304{ 332{
@@ -333,9 +361,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
333 WIPHY_FLAG_4ADDR_STATION; 361 WIPHY_FLAG_4ADDR_STATION;
334 wiphy->privid = mac80211_wiphy_privid; 362 wiphy->privid = mac80211_wiphy_privid;
335 363
336 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */ 364 wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
337 wiphy->bss_priv_size = sizeof(struct ieee80211_bss) -
338 sizeof(struct cfg80211_bss);
339 365
340 local = wiphy_priv(wiphy); 366 local = wiphy_priv(wiphy);
341 367
@@ -358,6 +384,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
358 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 384 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
359 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 385 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
360 local->user_power_level = -1; 386 local->user_power_level = -1;
387 local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
388 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
361 389
362 INIT_LIST_HEAD(&local->interfaces); 390 INIT_LIST_HEAD(&local->interfaces);
363 mutex_init(&local->iflist_mtx); 391 mutex_init(&local->iflist_mtx);
@@ -369,9 +397,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
369 397
370 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 398 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
371 399
400 ieee80211_work_init(local);
401
372 INIT_WORK(&local->restart_work, ieee80211_restart_work); 402 INIT_WORK(&local->restart_work, ieee80211_restart_work);
373 403
374 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); 404 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
405 INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
406 local->smps_mode = IEEE80211_SMPS_OFF;
375 407
376 INIT_WORK(&local->dynamic_ps_enable_work, 408 INIT_WORK(&local->dynamic_ps_enable_work,
377 ieee80211_dynamic_ps_enable_work); 409 ieee80211_dynamic_ps_enable_work);
@@ -461,6 +493,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
461 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 493 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
462 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; 494 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
463 495
496 WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
497 && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
498 "U-APSD not supported with HW_PS_NULLFUNC_STACK\n");
499
464 /* 500 /*
465 * Calculate scan IE length -- we need this to alloc 501 * Calculate scan IE length -- we need this to alloc
466 * memory and to subtract from the driver limit. It 502 * memory and to subtract from the driver limit. It
@@ -674,11 +710,19 @@ static int __init ieee80211_init(void)
674 710
675 ret = rc80211_pid_init(); 711 ret = rc80211_pid_init();
676 if (ret) 712 if (ret)
677 return ret; 713 goto err_pid;
678 714
679 ieee80211_debugfs_netdev_init(); 715 ret = ieee80211_iface_init();
716 if (ret)
717 goto err_netdev;
680 718
681 return 0; 719 return 0;
720 err_netdev:
721 rc80211_pid_exit();
722 err_pid:
723 rc80211_minstrel_exit();
724
725 return ret;
682} 726}
683 727
684static void __exit ieee80211_exit(void) 728static void __exit ieee80211_exit(void)
@@ -695,7 +739,7 @@ static void __exit ieee80211_exit(void)
695 if (mesh_allocated) 739 if (mesh_allocated)
696 ieee80211s_stop(); 740 ieee80211s_stop();
697 741
698 ieee80211_debugfs_netdev_exit(); 742 ieee80211_iface_exit();
699} 743}
700 744
701 745
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6a433142959..61080c5fad5 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -457,7 +457,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
457 457
458#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 458#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
459 printk(KERN_DEBUG "%s: running mesh housekeeping\n", 459 printk(KERN_DEBUG "%s: running mesh housekeeping\n",
460 sdata->dev->name); 460 sdata->name);
461#endif 461#endif
462 462
463 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); 463 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
@@ -565,7 +565,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
565 565
566 /* ignore ProbeResp to foreign address */ 566 /* ignore ProbeResp to foreign address */
567 if (stype == IEEE80211_STYPE_PROBE_RESP && 567 if (stype == IEEE80211_STYPE_PROBE_RESP &&
568 compare_ether_addr(mgmt->da, sdata->dev->dev_addr)) 568 compare_ether_addr(mgmt->da, sdata->vif.addr))
569 return; 569 return;
570 570
571 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 571 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -645,7 +645,7 @@ static void ieee80211_mesh_work(struct work_struct *work)
645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
646 struct sk_buff *skb; 646 struct sk_buff *skb;
647 647
648 if (!netif_running(sdata->dev)) 648 if (!ieee80211_sdata_running(sdata))
649 return; 649 return;
650 650
651 if (local->scanning) 651 if (local->scanning)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d28acb6b1f8..ce84237ebad 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -128,9 +128,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
128 IEEE80211_STYPE_ACTION); 128 IEEE80211_STYPE_ACTION);
129 129
130 memcpy(mgmt->da, da, ETH_ALEN); 130 memcpy(mgmt->da, da, ETH_ALEN);
131 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 131 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
132 /* BSSID == SA */ 132 /* BSSID == SA */
133 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 133 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
134 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 134 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
135 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 135 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
136 136
@@ -222,7 +222,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
222 IEEE80211_STYPE_ACTION); 222 IEEE80211_STYPE_ACTION);
223 223
224 memcpy(mgmt->da, ra, ETH_ALEN); 224 memcpy(mgmt->da, ra, ETH_ALEN);
225 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 225 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
226 /* BSSID is left zeroed, wildcard value */ 226 /* BSSID is left zeroed, wildcard value */
227 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 227 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
228 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 228 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
@@ -335,7 +335,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
335 bool process = true; 335 bool process = true;
336 336
337 rcu_read_lock(); 337 rcu_read_lock();
338 sta = sta_info_get(local, mgmt->sa); 338 sta = sta_info_get(sdata, mgmt->sa);
339 if (!sta) { 339 if (!sta) {
340 rcu_read_unlock(); 340 rcu_read_unlock();
341 return 0; 341 return 0;
@@ -374,7 +374,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
374 new_metric = MAX_METRIC; 374 new_metric = MAX_METRIC;
375 exp_time = TU_TO_EXP_TIME(orig_lifetime); 375 exp_time = TU_TO_EXP_TIME(orig_lifetime);
376 376
377 if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { 377 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
378 /* This MP is the originator, we are not interested in this 378 /* This MP is the originator, we are not interested in this
379 * frame, except for updating transmitter's path info. 379 * frame, except for updating transmitter's path info.
380 */ 380 */
@@ -486,7 +486,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
486 486
487 mhwmp_dbg("received PREQ from %pM\n", orig_addr); 487 mhwmp_dbg("received PREQ from %pM\n", orig_addr);
488 488
489 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { 489 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
490 mhwmp_dbg("PREQ is for us\n"); 490 mhwmp_dbg("PREQ is for us\n");
491 forward = false; 491 forward = false;
492 reply = true; 492 reply = true;
@@ -579,7 +579,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
579 * replies 579 * replies
580 */ 580 */
581 target_addr = PREP_IE_TARGET_ADDR(prep_elem); 581 target_addr = PREP_IE_TARGET_ADDR(prep_elem);
582 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) 582 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
583 /* destination, no forwarding required */ 583 /* destination, no forwarding required */
584 return; 584 return;
585 585
@@ -890,7 +890,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
890 target_flags = MP_F_RF; 890 target_flags = MP_F_RF;
891 891
892 spin_unlock_bh(&mpath->state_lock); 892 spin_unlock_bh(&mpath->state_lock);
893 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr, 893 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
894 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst, 894 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
895 cpu_to_le32(mpath->sn), broadcast_addr, 0, 895 cpu_to_le32(mpath->sn), broadcast_addr, 0,
896 ttl, cpu_to_le32(lifetime), 0, 896 ttl, cpu_to_le32(lifetime), 0,
@@ -939,7 +939,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
939 if (time_after(jiffies, 939 if (time_after(jiffies,
940 mpath->exp_time - 940 mpath->exp_time -
941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && 941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
942 !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) && 942 !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
943 !(mpath->flags & MESH_PATH_RESOLVING) && 943 !(mpath->flags & MESH_PATH_RESOLVING) &&
944 !(mpath->flags & MESH_PATH_FIXED)) { 944 !(mpath->flags & MESH_PATH_FIXED)) {
945 mesh_queue_preq(mpath, 945 mesh_queue_preq(mpath,
@@ -1010,7 +1010,7 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1010{ 1010{
1011 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1011 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1012 1012
1013 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->dev->dev_addr, 1013 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
1014 cpu_to_le32(++ifmsh->sn), 1014 cpu_to_le32(++ifmsh->sn),
1015 0, NULL, 0, broadcast_addr, 1015 0, NULL, 0, broadcast_addr,
1016 0, MESH_TTL, 0, 0, 0, sdata); 1016 0, MESH_TTL, 0, 0, 0, sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 0192cfdacae..2312efe04c6 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -260,7 +260,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
260 int err = 0; 260 int err = 0;
261 u32 hash_idx; 261 u32 hash_idx;
262 262
263 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 263 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
264 /* never add ourselves as neighbours */ 264 /* never add ourselves as neighbours */
265 return -ENOTSUPP; 265 return -ENOTSUPP;
266 266
@@ -377,7 +377,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
377 int err = 0; 377 int err = 0;
378 u32 hash_idx; 378 u32 hash_idx;
379 379
380 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 380 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
381 /* never add ourselves as neighbours */ 381 /* never add ourselves as neighbours */
382 return -ENOTSUPP; 382 return -ENOTSUPP;
383 383
@@ -605,7 +605,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
605 struct mesh_path *mpath; 605 struct mesh_path *mpath;
606 u32 sn = 0; 606 u32 sn = 0;
607 607
608 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { 608 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
609 u8 *ra, *da; 609 u8 *ra, *da;
610 610
611 da = hdr->addr3; 611 da = hdr->addr3;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 0f7c6e6a424..7985e515089 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -169,7 +169,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
169 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 169 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
170 IEEE80211_STYPE_ACTION); 170 IEEE80211_STYPE_ACTION);
171 memcpy(mgmt->da, da, ETH_ALEN); 171 memcpy(mgmt->da, da, ETH_ALEN);
172 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 172 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
173 /* BSSID is left zeroed, wildcard value */ 173 /* BSSID is left zeroed, wildcard value */
174 mgmt->u.action.category = MESH_PLINK_CATEGORY; 174 mgmt->u.action.category = MESH_PLINK_CATEGORY;
175 mgmt->u.action.u.plink_action.action_code = action; 175 mgmt->u.action.u.plink_action.action_code = action;
@@ -234,7 +234,7 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
234 234
235 rcu_read_lock(); 235 rcu_read_lock();
236 236
237 sta = sta_info_get(local, hw_addr); 237 sta = sta_info_get(sdata, hw_addr);
238 if (!sta) { 238 if (!sta) {
239 sta = mesh_plink_alloc(sdata, hw_addr, rates); 239 sta = mesh_plink_alloc(sdata, hw_addr, rates);
240 if (!sta) { 240 if (!sta) {
@@ -455,7 +455,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
455 455
456 rcu_read_lock(); 456 rcu_read_lock();
457 457
458 sta = sta_info_get(local, mgmt->sa); 458 sta = sta_info_get(sdata, mgmt->sa);
459 if (!sta && ftype != PLINK_OPEN) { 459 if (!sta && ftype != PLINK_OPEN) {
460 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); 460 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
461 rcu_read_unlock(); 461 rcu_read_unlock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 05a18f43e1b..1e1d16c55ee 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -75,11 +75,8 @@ enum rx_mgmt_action {
75 /* caller must call cfg80211_send_disassoc() */ 75 /* caller must call cfg80211_send_disassoc() */
76 RX_MGMT_CFG80211_DISASSOC, 76 RX_MGMT_CFG80211_DISASSOC,
77 77
78 /* caller must call cfg80211_auth_timeout() & free work */ 78 /* caller must tell cfg80211 about internal error */
79 RX_MGMT_CFG80211_AUTH_TO, 79 RX_MGMT_CFG80211_ASSOC_ERROR,
80
81 /* caller must call cfg80211_assoc_timeout() & free work */
82 RX_MGMT_CFG80211_ASSOC_TO,
83}; 80};
84 81
85/* utils */ 82/* utils */
@@ -122,27 +119,6 @@ static int ecw2cw(int ecw)
122 return (1 << ecw) - 1; 119 return (1 << ecw) - 1;
123} 120}
124 121
125static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
126 struct ieee80211_supported_band *sband,
127 u32 *rates)
128{
129 int i, j, count;
130 *rates = 0;
131 count = 0;
132 for (i = 0; i < bss->supp_rates_len; i++) {
133 int rate = (bss->supp_rates[i] & 0x7F) * 5;
134
135 for (j = 0; j < sband->n_bitrates; j++)
136 if (sband->bitrates[j].bitrate == rate) {
137 *rates |= BIT(j);
138 count++;
139 break;
140 }
141 }
142
143 return count;
144}
145
146/* 122/*
147 * ieee80211_enable_ht should be called only after the operating band 123 * ieee80211_enable_ht should be called only after the operating band
148 * has been determined as ht configuration depends on the hw's 124 * has been determined as ht configuration depends on the hw's
@@ -202,7 +178,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
202 ieee80211_hw_config(local, 0); 178 ieee80211_hw_config(local, 0);
203 179
204 rcu_read_lock(); 180 rcu_read_lock();
205 sta = sta_info_get(local, bssid); 181 sta = sta_info_get(sdata, bssid);
206 if (sta) 182 if (sta)
207 rate_control_rate_update(local, sband, sta, 183 rate_control_rate_update(local, sband, sta,
208 IEEE80211_RC_HT_CHANGED); 184 IEEE80211_RC_HT_CHANGED);
@@ -228,209 +204,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
228 204
229/* frame sending functions */ 205/* frame sending functions */
230 206
231static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
232 struct ieee80211_mgd_work *wk)
233{
234 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
235 struct ieee80211_local *local = sdata->local;
236 struct sk_buff *skb;
237 struct ieee80211_mgmt *mgmt;
238 u8 *pos;
239 const u8 *ies, *ht_ie;
240 int i, len, count, rates_len, supp_rates_len;
241 u16 capab;
242 int wmm = 0;
243 struct ieee80211_supported_band *sband;
244 u32 rates = 0;
245
246 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
247 sizeof(*mgmt) + 200 + wk->ie_len +
248 wk->ssid_len);
249 if (!skb) {
250 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
251 "frame\n", sdata->dev->name);
252 return;
253 }
254 skb_reserve(skb, local->hw.extra_tx_headroom);
255
256 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
257
258 capab = ifmgd->capab;
259
260 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) {
261 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
262 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
263 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
264 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
265 }
266
267 if (wk->bss->cbss.capability & WLAN_CAPABILITY_PRIVACY)
268 capab |= WLAN_CAPABILITY_PRIVACY;
269 if (wk->bss->wmm_used)
270 wmm = 1;
271
272 /* get all rates supported by the device and the AP as
273 * some APs don't like getting a superset of their rates
274 * in the association request (e.g. D-Link DAP 1353 in
275 * b-only mode) */
276 rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
277
278 if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
279 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
280 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
281
282 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
283 memset(mgmt, 0, 24);
284 memcpy(mgmt->da, wk->bss->cbss.bssid, ETH_ALEN);
285 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
286 memcpy(mgmt->bssid, wk->bss->cbss.bssid, ETH_ALEN);
287
288 if (!is_zero_ether_addr(wk->prev_bssid)) {
289 skb_put(skb, 10);
290 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
291 IEEE80211_STYPE_REASSOC_REQ);
292 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
293 mgmt->u.reassoc_req.listen_interval =
294 cpu_to_le16(local->hw.conf.listen_interval);
295 memcpy(mgmt->u.reassoc_req.current_ap, wk->prev_bssid,
296 ETH_ALEN);
297 } else {
298 skb_put(skb, 4);
299 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
300 IEEE80211_STYPE_ASSOC_REQ);
301 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
302 mgmt->u.assoc_req.listen_interval =
303 cpu_to_le16(local->hw.conf.listen_interval);
304 }
305
306 /* SSID */
307 ies = pos = skb_put(skb, 2 + wk->ssid_len);
308 *pos++ = WLAN_EID_SSID;
309 *pos++ = wk->ssid_len;
310 memcpy(pos, wk->ssid, wk->ssid_len);
311
312 /* add all rates which were marked to be used above */
313 supp_rates_len = rates_len;
314 if (supp_rates_len > 8)
315 supp_rates_len = 8;
316
317 len = sband->n_bitrates;
318 pos = skb_put(skb, supp_rates_len + 2);
319 *pos++ = WLAN_EID_SUPP_RATES;
320 *pos++ = supp_rates_len;
321
322 count = 0;
323 for (i = 0; i < sband->n_bitrates; i++) {
324 if (BIT(i) & rates) {
325 int rate = sband->bitrates[i].bitrate;
326 *pos++ = (u8) (rate / 5);
327 if (++count == 8)
328 break;
329 }
330 }
331
332 if (rates_len > count) {
333 pos = skb_put(skb, rates_len - count + 2);
334 *pos++ = WLAN_EID_EXT_SUPP_RATES;
335 *pos++ = rates_len - count;
336
337 for (i++; i < sband->n_bitrates; i++) {
338 if (BIT(i) & rates) {
339 int rate = sband->bitrates[i].bitrate;
340 *pos++ = (u8) (rate / 5);
341 }
342 }
343 }
344
345 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
346 /* 1. power capabilities */
347 pos = skb_put(skb, 4);
348 *pos++ = WLAN_EID_PWR_CAPABILITY;
349 *pos++ = 2;
350 *pos++ = 0; /* min tx power */
351 *pos++ = local->hw.conf.channel->max_power; /* max tx power */
352
353 /* 2. supported channels */
354 /* TODO: get this in reg domain format */
355 pos = skb_put(skb, 2 * sband->n_channels + 2);
356 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
357 *pos++ = 2 * sband->n_channels;
358 for (i = 0; i < sband->n_channels; i++) {
359 *pos++ = ieee80211_frequency_to_channel(
360 sband->channels[i].center_freq);
361 *pos++ = 1; /* one channel in the subband*/
362 }
363 }
364
365 if (wk->ie_len && wk->ie) {
366 pos = skb_put(skb, wk->ie_len);
367 memcpy(pos, wk->ie, wk->ie_len);
368 }
369
370 if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) {
371 pos = skb_put(skb, 9);
372 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
373 *pos++ = 7; /* len */
374 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
375 *pos++ = 0x50;
376 *pos++ = 0xf2;
377 *pos++ = 2; /* WME */
378 *pos++ = 0; /* WME info */
379 *pos++ = 1; /* WME ver */
380 *pos++ = 0;
381 }
382
383 /* wmm support is a must to HT */
384 /*
385 * IEEE802.11n does not allow TKIP/WEP as pairwise
386 * ciphers in HT mode. We still associate in non-ht
387 * mode (11a/b/g) if any one of these ciphers is
388 * configured as pairwise.
389 */
390 if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
391 sband->ht_cap.ht_supported &&
392 (ht_ie = ieee80211_bss_get_ie(&wk->bss->cbss, WLAN_EID_HT_INFORMATION)) &&
393 ht_ie[1] >= sizeof(struct ieee80211_ht_info) &&
394 (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))) {
395 struct ieee80211_ht_info *ht_info =
396 (struct ieee80211_ht_info *)(ht_ie + 2);
397 u16 cap = sband->ht_cap.cap;
398 __le16 tmp;
399 u32 flags = local->hw.conf.channel->flags;
400
401 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
402 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
403 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
404 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
405 cap &= ~IEEE80211_HT_CAP_SGI_40;
406 }
407 break;
408 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
409 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
410 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
411 cap &= ~IEEE80211_HT_CAP_SGI_40;
412 }
413 break;
414 }
415
416 tmp = cpu_to_le16(cap);
417 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
418 *pos++ = WLAN_EID_HT_CAPABILITY;
419 *pos++ = sizeof(struct ieee80211_ht_cap);
420 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
421 memcpy(pos, &tmp, sizeof(u16));
422 pos += sizeof(u16);
423 /* TODO: needs a define here for << 2 */
424 *pos++ = sband->ht_cap.ampdu_factor |
425 (sband->ht_cap.ampdu_density << 2);
426 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
427 }
428
429 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
430 ieee80211_tx_skb(sdata, skb);
431}
432
433
434static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 207static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
435 const u8 *bssid, u16 stype, u16 reason, 208 const u8 *bssid, u16 stype, u16 reason,
436 void *cookie) 209 void *cookie)
@@ -443,7 +216,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
443 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 216 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
444 if (!skb) { 217 if (!skb) {
445 printk(KERN_DEBUG "%s: failed to allocate buffer for " 218 printk(KERN_DEBUG "%s: failed to allocate buffer for "
446 "deauth/disassoc frame\n", sdata->dev->name); 219 "deauth/disassoc frame\n", sdata->name);
447 return; 220 return;
448 } 221 }
449 skb_reserve(skb, local->hw.extra_tx_headroom); 222 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -451,7 +224,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
451 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 224 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
452 memset(mgmt, 0, 24); 225 memset(mgmt, 0, 24);
453 memcpy(mgmt->da, bssid, ETH_ALEN); 226 memcpy(mgmt->da, bssid, ETH_ALEN);
454 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 227 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
455 memcpy(mgmt->bssid, bssid, ETH_ALEN); 228 memcpy(mgmt->bssid, bssid, ETH_ALEN);
456 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); 229 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
457 skb_put(skb, 2); 230 skb_put(skb, 2);
@@ -476,30 +249,15 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
476void ieee80211_send_pspoll(struct ieee80211_local *local, 249void ieee80211_send_pspoll(struct ieee80211_local *local,
477 struct ieee80211_sub_if_data *sdata) 250 struct ieee80211_sub_if_data *sdata)
478{ 251{
479 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
480 struct ieee80211_pspoll *pspoll; 252 struct ieee80211_pspoll *pspoll;
481 struct sk_buff *skb; 253 struct sk_buff *skb;
482 u16 fc;
483 254
484 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); 255 skb = ieee80211_pspoll_get(&local->hw, &sdata->vif);
485 if (!skb) { 256 if (!skb)
486 printk(KERN_DEBUG "%s: failed to allocate buffer for "
487 "pspoll frame\n", sdata->dev->name);
488 return; 257 return;
489 }
490 skb_reserve(skb, local->hw.extra_tx_headroom);
491 258
492 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); 259 pspoll = (struct ieee80211_pspoll *) skb->data;
493 memset(pspoll, 0, sizeof(*pspoll)); 260 pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
494 fc = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM;
495 pspoll->frame_control = cpu_to_le16(fc);
496 pspoll->aid = cpu_to_le16(ifmgd->aid);
497
498 /* aid in PS-Poll has its two MSBs each set to 1 */
499 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
500
501 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
502 memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN);
503 261
504 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 262 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
505 ieee80211_tx_skb(sdata, skb); 263 ieee80211_tx_skb(sdata, skb);
@@ -510,30 +268,47 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
510 int powersave) 268 int powersave)
511{ 269{
512 struct sk_buff *skb; 270 struct sk_buff *skb;
271 struct ieee80211_hdr_3addr *nullfunc;
272
273 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
274 if (!skb)
275 return;
276
277 nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
278 if (powersave)
279 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
280
281 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
282 ieee80211_tx_skb(sdata, skb);
283}
284
285static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
286 struct ieee80211_sub_if_data *sdata)
287{
288 struct sk_buff *skb;
513 struct ieee80211_hdr *nullfunc; 289 struct ieee80211_hdr *nullfunc;
514 __le16 fc; 290 __le16 fc;
515 291
516 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 292 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
517 return; 293 return;
518 294
519 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); 295 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
520 if (!skb) { 296 if (!skb) {
521 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " 297 printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
522 "frame\n", sdata->dev->name); 298 "nullfunc frame\n", sdata->name);
523 return; 299 return;
524 } 300 }
525 skb_reserve(skb, local->hw.extra_tx_headroom); 301 skb_reserve(skb, local->hw.extra_tx_headroom);
526 302
527 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); 303 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
528 memset(nullfunc, 0, 24); 304 memset(nullfunc, 0, 30);
529 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | 305 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
530 IEEE80211_FCTL_TODS); 306 IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
531 if (powersave)
532 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
533 nullfunc->frame_control = fc; 307 nullfunc->frame_control = fc;
534 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN); 308 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
535 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); 309 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
536 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN); 310 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
311 memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN);
537 312
538 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 313 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
539 ieee80211_tx_skb(sdata, skb); 314 ieee80211_tx_skb(sdata, skb);
@@ -546,7 +321,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
546 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); 321 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
547 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 322 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
548 323
549 if (!netif_running(sdata->dev)) 324 if (!ieee80211_sdata_running(sdata))
550 return; 325 return;
551 326
552 mutex_lock(&ifmgd->mtx); 327 mutex_lock(&ifmgd->mtx);
@@ -557,7 +332,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
557 ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL); 332 ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL);
558 333
559 /* XXX: shouldn't really modify cfg80211-owned data! */ 334 /* XXX: shouldn't really modify cfg80211-owned data! */
560 ifmgd->associated->cbss.channel = sdata->local->oper_channel; 335 ifmgd->associated->channel = sdata->local->oper_channel;
561 336
562 ieee80211_wake_queues_by_reason(&sdata->local->hw, 337 ieee80211_wake_queues_by_reason(&sdata->local->hw,
563 IEEE80211_QUEUE_STOP_REASON_CSA); 338 IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -584,6 +359,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
584 struct ieee80211_channel_sw_ie *sw_elem, 359 struct ieee80211_channel_sw_ie *sw_elem,
585 struct ieee80211_bss *bss) 360 struct ieee80211_bss *bss)
586{ 361{
362 struct cfg80211_bss *cbss =
363 container_of((void *)bss, struct cfg80211_bss, priv);
587 struct ieee80211_channel *new_ch; 364 struct ieee80211_channel *new_ch;
588 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 365 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
589 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num); 366 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
@@ -617,7 +394,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
617 mod_timer(&ifmgd->chswitch_timer, 394 mod_timer(&ifmgd->chswitch_timer,
618 jiffies + 395 jiffies +
619 msecs_to_jiffies(sw_elem->count * 396 msecs_to_jiffies(sw_elem->count *
620 bss->cbss.beacon_interval)); 397 cbss->beacon_interval));
621 } 398 }
622} 399}
623 400
@@ -691,8 +468,13 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
691 return; 468 return;
692 } 469 }
693 470
471 if (!list_empty(&local->work_list)) {
472 local->ps_sdata = NULL;
473 goto change;
474 }
475
694 list_for_each_entry(sdata, &local->interfaces, list) { 476 list_for_each_entry(sdata, &local->interfaces, list) {
695 if (!netif_running(sdata->dev)) 477 if (!ieee80211_sdata_running(sdata))
696 continue; 478 continue;
697 if (sdata->vif.type != NL80211_IFTYPE_STATION) 479 if (sdata->vif.type != NL80211_IFTYPE_STATION)
698 continue; 480 continue;
@@ -701,7 +483,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
701 } 483 }
702 484
703 if (count == 1 && found->u.mgd.powersave && 485 if (count == 1 && found->u.mgd.powersave &&
704 found->u.mgd.associated && list_empty(&found->u.mgd.work_list) && 486 found->u.mgd.associated &&
705 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | 487 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
706 IEEE80211_STA_CONNECTION_POLL))) { 488 IEEE80211_STA_CONNECTION_POLL))) {
707 s32 beaconint_us; 489 s32 beaconint_us;
@@ -729,6 +511,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
729 local->ps_sdata = NULL; 511 local->ps_sdata = NULL;
730 } 512 }
731 513
514 change:
732 ieee80211_change_ps(local); 515 ieee80211_change_ps(local);
733} 516}
734 517
@@ -786,9 +569,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
786 struct ieee80211_tx_queue_params params; 569 struct ieee80211_tx_queue_params params;
787 size_t left; 570 size_t left;
788 int count; 571 int count;
789 u8 *pos; 572 u8 *pos, uapsd_queues = 0;
790 573
791 if (!(ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) 574 if (local->hw.queues < 4)
792 return; 575 return;
793 576
794 if (!wmm_param) 577 if (!wmm_param)
@@ -796,6 +579,10 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
796 579
797 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) 580 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
798 return; 581 return;
582
583 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
584 uapsd_queues = local->uapsd_queues;
585
799 count = wmm_param[6] & 0x0f; 586 count = wmm_param[6] & 0x0f;
800 if (count == ifmgd->wmm_last_param_set) 587 if (count == ifmgd->wmm_last_param_set)
801 return; 588 return;
@@ -810,6 +597,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
810 for (; left >= 4; left -= 4, pos += 4) { 597 for (; left >= 4; left -= 4, pos += 4) {
811 int aci = (pos[0] >> 5) & 0x03; 598 int aci = (pos[0] >> 5) & 0x03;
812 int acm = (pos[0] >> 4) & 0x01; 599 int acm = (pos[0] >> 4) & 0x01;
600 bool uapsd = false;
813 int queue; 601 int queue;
814 602
815 switch (aci) { 603 switch (aci) {
@@ -817,22 +605,30 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
817 queue = 3; 605 queue = 3;
818 if (acm) 606 if (acm)
819 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ 607 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
608 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
609 uapsd = true;
820 break; 610 break;
821 case 2: /* AC_VI */ 611 case 2: /* AC_VI */
822 queue = 1; 612 queue = 1;
823 if (acm) 613 if (acm)
824 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ 614 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
615 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
616 uapsd = true;
825 break; 617 break;
826 case 3: /* AC_VO */ 618 case 3: /* AC_VO */
827 queue = 0; 619 queue = 0;
828 if (acm) 620 if (acm)
829 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ 621 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
622 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
623 uapsd = true;
830 break; 624 break;
831 case 0: /* AC_BE */ 625 case 0: /* AC_BE */
832 default: 626 default:
833 queue = 2; 627 queue = 2;
834 if (acm) 628 if (acm)
835 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ 629 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
630 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
631 uapsd = true;
836 break; 632 break;
837 } 633 }
838 634
@@ -840,11 +636,14 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
840 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); 636 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
841 params.cw_min = ecw2cw(pos[1] & 0x0f); 637 params.cw_min = ecw2cw(pos[1] & 0x0f);
842 params.txop = get_unaligned_le16(pos + 2); 638 params.txop = get_unaligned_le16(pos + 2);
639 params.uapsd = uapsd;
640
843#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 641#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
844 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 642 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
845 "cWmin=%d cWmax=%d txop=%d\n", 643 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
846 wiphy_name(local->hw.wiphy), queue, aci, acm, 644 wiphy_name(local->hw.wiphy), queue, aci, acm,
847 params.aifs, params.cw_min, params.cw_max, params.txop); 645 params.aifs, params.cw_min, params.cw_max, params.txop,
646 params.uapsd);
848#endif 647#endif
849 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx) 648 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
850 printk(KERN_DEBUG "%s: failed to set TX queue " 649 printk(KERN_DEBUG "%s: failed to set TX queue "
@@ -871,6 +670,8 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
871 } 670 }
872 671
873 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); 672 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
673 if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
674 use_short_slot = true;
874 675
875 if (use_protection != bss_conf->use_cts_prot) { 676 if (use_protection != bss_conf->use_cts_prot) {
876 bss_conf->use_cts_prot = use_protection; 677 bss_conf->use_cts_prot = use_protection;
@@ -891,25 +692,24 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
891} 692}
892 693
893static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, 694static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
894 struct ieee80211_mgd_work *wk, 695 struct cfg80211_bss *cbss,
895 u32 bss_info_changed) 696 u32 bss_info_changed)
896{ 697{
698 struct ieee80211_bss *bss = (void *)cbss->priv;
897 struct ieee80211_local *local = sdata->local; 699 struct ieee80211_local *local = sdata->local;
898 struct ieee80211_bss *bss = wk->bss;
899 700
900 bss_info_changed |= BSS_CHANGED_ASSOC; 701 bss_info_changed |= BSS_CHANGED_ASSOC;
901 /* set timing information */ 702 /* set timing information */
902 sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval; 703 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
903 sdata->vif.bss_conf.timestamp = bss->cbss.tsf; 704 sdata->vif.bss_conf.timestamp = cbss->tsf;
904 sdata->vif.bss_conf.dtim_period = bss->dtim_period; 705 sdata->vif.bss_conf.dtim_period = bss->dtim_period;
905 706
906 bss_info_changed |= BSS_CHANGED_BEACON_INT; 707 bss_info_changed |= BSS_CHANGED_BEACON_INT;
907 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 708 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
908 bss->cbss.capability, bss->has_erp_value, bss->erp_value); 709 cbss->capability, bss->has_erp_value, bss->erp_value);
909 710
910 sdata->u.mgd.associated = bss; 711 sdata->u.mgd.associated = cbss;
911 sdata->u.mgd.old_associate_work = wk; 712 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
912 memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN);
913 713
914 /* just to be sure */ 714 /* just to be sure */
915 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 715 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -940,99 +740,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
940 740
941 mutex_lock(&local->iflist_mtx); 741 mutex_lock(&local->iflist_mtx);
942 ieee80211_recalc_ps(local, -1); 742 ieee80211_recalc_ps(local, -1);
743 ieee80211_recalc_smps(local, sdata);
943 mutex_unlock(&local->iflist_mtx); 744 mutex_unlock(&local->iflist_mtx);
944 745
945 netif_tx_start_all_queues(sdata->dev); 746 netif_tx_start_all_queues(sdata->dev);
946 netif_carrier_on(sdata->dev); 747 netif_carrier_on(sdata->dev);
947} 748}
948 749
949static enum rx_mgmt_action __must_check 750static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
950ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
951 struct ieee80211_mgd_work *wk)
952{
953 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
954 struct ieee80211_local *local = sdata->local;
955
956 wk->tries++;
957 if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
958 printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n",
959 sdata->dev->name, wk->bss->cbss.bssid);
960
961 /*
962 * Most likely AP is not in the range so remove the
963 * bss struct for that AP.
964 */
965 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
966
967 /*
968 * We might have a pending scan which had no chance to run yet
969 * due to work needing to be done. Hence, queue the STAs work
970 * again for that.
971 */
972 ieee80211_queue_work(&local->hw, &ifmgd->work);
973 return RX_MGMT_CFG80211_AUTH_TO;
974 }
975
976 printk(KERN_DEBUG "%s: direct probe to AP %pM (try %d)\n",
977 sdata->dev->name, wk->bss->cbss.bssid,
978 wk->tries);
979
980 /*
981 * Direct probe is sent to broadcast address as some APs
982 * will not answer to direct packet in unassociated state.
983 */
984 ieee80211_send_probe_req(sdata, NULL, wk->ssid, wk->ssid_len, NULL, 0);
985
986 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
987 run_again(ifmgd, wk->timeout);
988
989 return RX_MGMT_NONE;
990}
991
992
993static enum rx_mgmt_action __must_check
994ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
995 struct ieee80211_mgd_work *wk)
996{
997 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
998 struct ieee80211_local *local = sdata->local;
999
1000 wk->tries++;
1001 if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
1002 printk(KERN_DEBUG "%s: authentication with AP %pM"
1003 " timed out\n",
1004 sdata->dev->name, wk->bss->cbss.bssid);
1005
1006 /*
1007 * Most likely AP is not in the range so remove the
1008 * bss struct for that AP.
1009 */
1010 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
1011
1012 /*
1013 * We might have a pending scan which had no chance to run yet
1014 * due to work needing to be done. Hence, queue the STAs work
1015 * again for that.
1016 */
1017 ieee80211_queue_work(&local->hw, &ifmgd->work);
1018 return RX_MGMT_CFG80211_AUTH_TO;
1019 }
1020
1021 printk(KERN_DEBUG "%s: authenticate with AP %pM (try %d)\n",
1022 sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
1023
1024 ieee80211_send_auth(sdata, 1, wk->auth_alg, wk->ie, wk->ie_len,
1025 wk->bss->cbss.bssid, NULL, 0, 0);
1026 wk->auth_transaction = 2;
1027
1028 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
1029 run_again(ifmgd, wk->timeout);
1030
1031 return RX_MGMT_NONE;
1032}
1033
1034static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1035 bool deauth)
1036{ 751{
1037 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1038 struct ieee80211_local *local = sdata->local; 753 struct ieee80211_local *local = sdata->local;
@@ -1045,21 +760,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1045 if (WARN_ON(!ifmgd->associated)) 760 if (WARN_ON(!ifmgd->associated))
1046 return; 761 return;
1047 762
1048 memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); 763 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
1049 764
1050 ifmgd->associated = NULL; 765 ifmgd->associated = NULL;
1051 memset(ifmgd->bssid, 0, ETH_ALEN); 766 memset(ifmgd->bssid, 0, ETH_ALEN);
1052 767
1053 if (deauth) {
1054 kfree(ifmgd->old_associate_work);
1055 ifmgd->old_associate_work = NULL;
1056 } else {
1057 struct ieee80211_mgd_work *wk = ifmgd->old_associate_work;
1058
1059 wk->state = IEEE80211_MGD_STATE_IDLE;
1060 list_add(&wk->list, &ifmgd->work_list);
1061 }
1062
1063 /* 768 /*
1064 * we need to commit the associated = NULL change because the 769 * we need to commit the associated = NULL change because the
1065 * scan code uses that to determine whether this iface should 770 * scan code uses that to determine whether this iface should
@@ -1078,7 +783,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1078 netif_carrier_off(sdata->dev); 783 netif_carrier_off(sdata->dev);
1079 784
1080 rcu_read_lock(); 785 rcu_read_lock();
1081 sta = sta_info_get(local, bssid); 786 sta = sta_info_get(sdata, bssid);
1082 if (sta) 787 if (sta)
1083 ieee80211_sta_tear_down_BA_sessions(sta); 788 ieee80211_sta_tear_down_BA_sessions(sta);
1084 rcu_read_unlock(); 789 rcu_read_unlock();
@@ -1115,7 +820,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1115 820
1116 rcu_read_lock(); 821 rcu_read_lock();
1117 822
1118 sta = sta_info_get(local, bssid); 823 sta = sta_info_get(sdata, bssid);
1119 if (!sta) { 824 if (!sta) {
1120 rcu_read_unlock(); 825 rcu_read_unlock();
1121 return; 826 return;
@@ -1128,44 +833,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1128 sta_info_destroy(sta); 833 sta_info_destroy(sta);
1129} 834}
1130 835
1131static enum rx_mgmt_action __must_check
1132ieee80211_associate(struct ieee80211_sub_if_data *sdata,
1133 struct ieee80211_mgd_work *wk)
1134{
1135 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1136 struct ieee80211_local *local = sdata->local;
1137
1138 wk->tries++;
1139 if (wk->tries > IEEE80211_ASSOC_MAX_TRIES) {
1140 printk(KERN_DEBUG "%s: association with AP %pM"
1141 " timed out\n",
1142 sdata->dev->name, wk->bss->cbss.bssid);
1143
1144 /*
1145 * Most likely AP is not in the range so remove the
1146 * bss struct for that AP.
1147 */
1148 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
1149
1150 /*
1151 * We might have a pending scan which had no chance to run yet
1152 * due to work needing to be done. Hence, queue the STAs work
1153 * again for that.
1154 */
1155 ieee80211_queue_work(&local->hw, &ifmgd->work);
1156 return RX_MGMT_CFG80211_ASSOC_TO;
1157 }
1158
1159 printk(KERN_DEBUG "%s: associate with AP %pM (try %d)\n",
1160 sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
1161 ieee80211_send_assoc(sdata, wk);
1162
1163 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
1164 run_again(ifmgd, wk->timeout);
1165
1166 return RX_MGMT_NONE;
1167}
1168
1169void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 836void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1170 struct ieee80211_hdr *hdr) 837 struct ieee80211_hdr *hdr)
1171{ 838{
@@ -1189,8 +856,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1189 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 856 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1190 const u8 *ssid; 857 const u8 *ssid;
1191 858
1192 ssid = ieee80211_bss_get_ie(&ifmgd->associated->cbss, WLAN_EID_SSID); 859 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1193 ieee80211_send_probe_req(sdata, ifmgd->associated->cbss.bssid, 860 ieee80211_send_probe_req(sdata, ifmgd->associated->bssid,
1194 ssid + 2, ssid[1], NULL, 0); 861 ssid + 2, ssid[1], NULL, 0);
1195 862
1196 ifmgd->probe_send_count++; 863 ifmgd->probe_send_count++;
@@ -1204,12 +871,15 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1204 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 871 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1205 bool already = false; 872 bool already = false;
1206 873
1207 if (!netif_running(sdata->dev)) 874 if (!ieee80211_sdata_running(sdata))
1208 return; 875 return;
1209 876
1210 if (sdata->local->scanning) 877 if (sdata->local->scanning)
1211 return; 878 return;
1212 879
880 if (sdata->local->tmp_channel)
881 return;
882
1213 mutex_lock(&ifmgd->mtx); 883 mutex_lock(&ifmgd->mtx);
1214 884
1215 if (!ifmgd->associated) 885 if (!ifmgd->associated)
@@ -1218,7 +888,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1218#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 888#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1219 if (beacon && net_ratelimit()) 889 if (beacon && net_ratelimit())
1220 printk(KERN_DEBUG "%s: detected beacon loss from AP " 890 printk(KERN_DEBUG "%s: detected beacon loss from AP "
1221 "- sending probe request\n", sdata->dev->name); 891 "- sending probe request\n", sdata->name);
1222#endif 892#endif
1223 893
1224 /* 894 /*
@@ -1271,88 +941,8 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif)
1271} 941}
1272EXPORT_SYMBOL(ieee80211_beacon_loss); 942EXPORT_SYMBOL(ieee80211_beacon_loss);
1273 943
1274static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
1275 struct ieee80211_mgd_work *wk)
1276{
1277 wk->state = IEEE80211_MGD_STATE_IDLE;
1278 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
1279}
1280
1281
1282static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1283 struct ieee80211_mgd_work *wk,
1284 struct ieee80211_mgmt *mgmt,
1285 size_t len)
1286{
1287 u8 *pos;
1288 struct ieee802_11_elems elems;
1289
1290 pos = mgmt->u.auth.variable;
1291 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1292 if (!elems.challenge)
1293 return;
1294 ieee80211_send_auth(sdata, 3, wk->auth_alg,
1295 elems.challenge - 2, elems.challenge_len + 2,
1296 wk->bss->cbss.bssid,
1297 wk->key, wk->key_len, wk->key_idx);
1298 wk->auth_transaction = 4;
1299}
1300
1301static enum rx_mgmt_action __must_check
1302ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1303 struct ieee80211_mgd_work *wk,
1304 struct ieee80211_mgmt *mgmt, size_t len)
1305{
1306 u16 auth_alg, auth_transaction, status_code;
1307
1308 if (wk->state != IEEE80211_MGD_STATE_AUTH)
1309 return RX_MGMT_NONE;
1310
1311 if (len < 24 + 6)
1312 return RX_MGMT_NONE;
1313
1314 if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
1315 return RX_MGMT_NONE;
1316
1317 if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
1318 return RX_MGMT_NONE;
1319
1320 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1321 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1322 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1323
1324 if (auth_alg != wk->auth_alg ||
1325 auth_transaction != wk->auth_transaction)
1326 return RX_MGMT_NONE;
1327
1328 if (status_code != WLAN_STATUS_SUCCESS) {
1329 list_del(&wk->list);
1330 kfree(wk);
1331 return RX_MGMT_CFG80211_AUTH;
1332 }
1333
1334 switch (wk->auth_alg) {
1335 case WLAN_AUTH_OPEN:
1336 case WLAN_AUTH_LEAP:
1337 case WLAN_AUTH_FT:
1338 ieee80211_auth_completed(sdata, wk);
1339 return RX_MGMT_CFG80211_AUTH;
1340 case WLAN_AUTH_SHARED_KEY:
1341 if (wk->auth_transaction == 4) {
1342 ieee80211_auth_completed(sdata, wk);
1343 return RX_MGMT_CFG80211_AUTH;
1344 } else
1345 ieee80211_auth_challenge(sdata, wk, mgmt, len);
1346 break;
1347 }
1348
1349 return RX_MGMT_NONE;
1350}
1351
1352
1353static enum rx_mgmt_action __must_check 944static enum rx_mgmt_action __must_check
1354ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, 945ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1355 struct ieee80211_mgd_work *wk,
1356 struct ieee80211_mgmt *mgmt, size_t len) 946 struct ieee80211_mgmt *mgmt, size_t len)
1357{ 947{
1358 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 948 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1364,23 +954,15 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1364 954
1365 ASSERT_MGD_MTX(ifmgd); 955 ASSERT_MGD_MTX(ifmgd);
1366 956
1367 if (wk) 957 bssid = ifmgd->associated->bssid;
1368 bssid = wk->bss->cbss.bssid;
1369 else
1370 bssid = ifmgd->associated->cbss.bssid;
1371 958
1372 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 959 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1373 960
1374 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 961 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
1375 sdata->dev->name, bssid, reason_code); 962 sdata->name, bssid, reason_code);
1376 963
1377 if (!wk) { 964 ieee80211_set_disassoc(sdata);
1378 ieee80211_set_disassoc(sdata, true); 965 ieee80211_recalc_idle(sdata->local);
1379 ieee80211_recalc_idle(sdata->local);
1380 } else {
1381 list_del(&wk->list);
1382 kfree(wk);
1383 }
1384 966
1385 return RX_MGMT_CFG80211_DEAUTH; 967 return RX_MGMT_CFG80211_DEAUTH;
1386} 968}
@@ -1401,123 +983,72 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1401 if (WARN_ON(!ifmgd->associated)) 983 if (WARN_ON(!ifmgd->associated))
1402 return RX_MGMT_NONE; 984 return RX_MGMT_NONE;
1403 985
1404 if (WARN_ON(memcmp(ifmgd->associated->cbss.bssid, mgmt->sa, ETH_ALEN))) 986 if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN)))
1405 return RX_MGMT_NONE; 987 return RX_MGMT_NONE;
1406 988
1407 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 989 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1408 990
1409 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 991 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
1410 sdata->dev->name, mgmt->sa, reason_code); 992 sdata->name, mgmt->sa, reason_code);
1411 993
1412 ieee80211_set_disassoc(sdata, false); 994 ieee80211_set_disassoc(sdata);
1413 ieee80211_recalc_idle(sdata->local); 995 ieee80211_recalc_idle(sdata->local);
1414 return RX_MGMT_CFG80211_DISASSOC; 996 return RX_MGMT_CFG80211_DISASSOC;
1415} 997}
1416 998
1417 999
1418static enum rx_mgmt_action __must_check 1000static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1419ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, 1001 struct ieee80211_mgmt *mgmt, size_t len)
1420 struct ieee80211_mgd_work *wk,
1421 struct ieee80211_mgmt *mgmt, size_t len,
1422 bool reassoc)
1423{ 1002{
1003 struct ieee80211_sub_if_data *sdata = wk->sdata;
1424 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1004 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1425 struct ieee80211_local *local = sdata->local; 1005 struct ieee80211_local *local = sdata->local;
1426 struct ieee80211_supported_band *sband; 1006 struct ieee80211_supported_band *sband;
1427 struct sta_info *sta; 1007 struct sta_info *sta;
1008 struct cfg80211_bss *cbss = wk->assoc.bss;
1009 u8 *pos;
1428 u32 rates, basic_rates; 1010 u32 rates, basic_rates;
1429 u16 capab_info, status_code, aid; 1011 u16 capab_info, aid;
1430 struct ieee802_11_elems elems; 1012 struct ieee802_11_elems elems;
1431 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 1013 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
1432 u8 *pos;
1433 u32 changed = 0; 1014 u32 changed = 0;
1434 int i, j; 1015 int i, j, err;
1435 bool have_higher_than_11mbit = false, newsta = false; 1016 bool have_higher_than_11mbit = false;
1436 u16 ap_ht_cap_flags; 1017 u16 ap_ht_cap_flags;
1437 1018
1438 /* 1019 /* AssocResp and ReassocResp have identical structure */
1439 * AssocResp and ReassocResp have identical structure, so process both
1440 * of them in this function.
1441 */
1442
1443 if (len < 24 + 6)
1444 return RX_MGMT_NONE;
1445
1446 if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
1447 return RX_MGMT_NONE;
1448 1020
1449 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1450 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
1451 aid = le16_to_cpu(mgmt->u.assoc_resp.aid); 1021 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
1452 1022 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1453 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
1454 "status=%d aid=%d)\n",
1455 sdata->dev->name, reassoc ? "Rea" : "A", mgmt->sa,
1456 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
1457
1458 pos = mgmt->u.assoc_resp.variable;
1459 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1460
1461 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
1462 elems.timeout_int && elems.timeout_int_len == 5 &&
1463 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
1464 u32 tu, ms;
1465 tu = get_unaligned_le32(elems.timeout_int + 1);
1466 ms = tu * 1024 / 1000;
1467 printk(KERN_DEBUG "%s: AP rejected association temporarily; "
1468 "comeback duration %u TU (%u ms)\n",
1469 sdata->dev->name, tu, ms);
1470 wk->timeout = jiffies + msecs_to_jiffies(ms);
1471 if (ms > IEEE80211_ASSOC_TIMEOUT)
1472 run_again(ifmgd, jiffies + msecs_to_jiffies(ms));
1473 return RX_MGMT_NONE;
1474 }
1475
1476 if (status_code != WLAN_STATUS_SUCCESS) {
1477 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
1478 sdata->dev->name, status_code);
1479 wk->state = IEEE80211_MGD_STATE_IDLE;
1480 return RX_MGMT_CFG80211_ASSOC;
1481 }
1482 1023
1483 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1024 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
1484 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1025 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
1485 "set\n", sdata->dev->name, aid); 1026 "set\n", sdata->name, aid);
1486 aid &= ~(BIT(15) | BIT(14)); 1027 aid &= ~(BIT(15) | BIT(14));
1487 1028
1029 pos = mgmt->u.assoc_resp.variable;
1030 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1031
1488 if (!elems.supp_rates) { 1032 if (!elems.supp_rates) {
1489 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 1033 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
1490 sdata->dev->name); 1034 sdata->name);
1491 return RX_MGMT_NONE; 1035 return false;
1492 } 1036 }
1493 1037
1494 printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
1495 ifmgd->aid = aid; 1038 ifmgd->aid = aid;
1496 1039
1497 rcu_read_lock(); 1040 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
1498
1499 /* Add STA entry for the AP */
1500 sta = sta_info_get(local, wk->bss->cbss.bssid);
1501 if (!sta) { 1041 if (!sta) {
1502 newsta = true; 1042 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
1503 1043 " the AP\n", sdata->name);
1504 rcu_read_unlock(); 1044 return false;
1505
1506 sta = sta_info_alloc(sdata, wk->bss->cbss.bssid, GFP_KERNEL);
1507 if (!sta) {
1508 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
1509 " the AP\n", sdata->dev->name);
1510 return RX_MGMT_NONE;
1511 }
1512
1513 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
1514 WLAN_STA_ASSOC_AP);
1515 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1516 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1517
1518 rcu_read_lock();
1519 } 1045 }
1520 1046
1047 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
1048 WLAN_STA_ASSOC_AP);
1049 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1050 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1051
1521 rates = 0; 1052 rates = 0;
1522 basic_rates = 0; 1053 basic_rates = 0;
1523 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1054 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -1580,40 +1111,40 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1580 if (elems.wmm_param) 1111 if (elems.wmm_param)
1581 set_sta_flags(sta, WLAN_STA_WME); 1112 set_sta_flags(sta, WLAN_STA_WME);
1582 1113
1583 if (newsta) { 1114 err = sta_info_insert(sta);
1584 int err = sta_info_insert(sta); 1115 sta = NULL;
1585 if (err) { 1116 if (err) {
1586 printk(KERN_DEBUG "%s: failed to insert STA entry for" 1117 printk(KERN_DEBUG "%s: failed to insert STA entry for"
1587 " the AP (error %d)\n", sdata->dev->name, err); 1118 " the AP (error %d)\n", sdata->name, err);
1588 rcu_read_unlock(); 1119 return false;
1589 return RX_MGMT_NONE;
1590 }
1591 } 1120 }
1592 1121
1593 rcu_read_unlock();
1594
1595 if (elems.wmm_param) 1122 if (elems.wmm_param)
1596 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1123 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1597 elems.wmm_param_len); 1124 elems.wmm_param_len);
1598 else 1125 else
1599 ieee80211_set_wmm_default(sdata); 1126 ieee80211_set_wmm_default(sdata);
1600 1127
1128 local->oper_channel = wk->chan;
1129
1601 if (elems.ht_info_elem && elems.wmm_param && 1130 if (elems.ht_info_elem && elems.wmm_param &&
1602 (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && 1131 (sdata->local->hw.queues >= 4) &&
1603 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 1132 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1604 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 1133 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
1605 wk->bss->cbss.bssid, 1134 cbss->bssid, ap_ht_cap_flags);
1606 ap_ht_cap_flags);
1607
1608 /* delete work item -- must be before set_associated for PS */
1609 list_del(&wk->list);
1610 1135
1611 /* set AID and assoc capability, 1136 /* set AID and assoc capability,
1612 * ieee80211_set_associated() will tell the driver */ 1137 * ieee80211_set_associated() will tell the driver */
1613 bss_conf->aid = aid; 1138 bss_conf->aid = aid;
1614 bss_conf->assoc_capability = capab_info; 1139 bss_conf->assoc_capability = capab_info;
1615 /* this will take ownership of wk */ 1140 ieee80211_set_associated(sdata, cbss, changed);
1616 ieee80211_set_associated(sdata, wk, changed); 1141
1142 /*
1143 * If we're using 4-addr mode, let the AP know that we're
1144 * doing so, so that it can create the STA VLAN on its side
1145 */
1146 if (ifmgd->use_4addr)
1147 ieee80211_send_4addr_nullfunc(local, sdata);
1617 1148
1618 /* 1149 /*
1619 * Start timer to probe the connection to the AP now. 1150 * Start timer to probe the connection to the AP now.
@@ -1622,7 +1153,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1622 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); 1153 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
1623 mod_beacon_timer(sdata); 1154 mod_beacon_timer(sdata);
1624 1155
1625 return RX_MGMT_CFG80211_ASSOC; 1156 return true;
1626} 1157}
1627 1158
1628 1159
@@ -1657,7 +1188,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1657 return; 1188 return;
1658 1189
1659 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && 1190 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
1660 (memcmp(mgmt->bssid, sdata->u.mgd.associated->cbss.bssid, 1191 (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
1661 ETH_ALEN) == 0)) { 1192 ETH_ALEN) == 0)) {
1662 struct ieee80211_channel_sw_ie *sw_elem = 1193 struct ieee80211_channel_sw_ie *sw_elem =
1663 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; 1194 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
@@ -1667,19 +1198,19 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1667 1198
1668 1199
1669static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, 1200static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1670 struct ieee80211_mgd_work *wk, 1201 struct sk_buff *skb)
1671 struct ieee80211_mgmt *mgmt, size_t len,
1672 struct ieee80211_rx_status *rx_status)
1673{ 1202{
1203 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1674 struct ieee80211_if_managed *ifmgd; 1204 struct ieee80211_if_managed *ifmgd;
1675 size_t baselen; 1205 struct ieee80211_rx_status *rx_status = (void *) skb->cb;
1206 size_t baselen, len = skb->len;
1676 struct ieee802_11_elems elems; 1207 struct ieee802_11_elems elems;
1677 1208
1678 ifmgd = &sdata->u.mgd; 1209 ifmgd = &sdata->u.mgd;
1679 1210
1680 ASSERT_MGD_MTX(ifmgd); 1211 ASSERT_MGD_MTX(ifmgd);
1681 1212
1682 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 1213 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
1683 return; /* ignore ProbeResp to foreign address */ 1214 return; /* ignore ProbeResp to foreign address */
1684 1215
1685 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 1216 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -1691,17 +1222,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1691 1222
1692 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 1223 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1693 1224
1694 /* direct probe may be part of the association flow */
1695 if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) {
1696 printk(KERN_DEBUG "%s: direct probe responded\n",
1697 sdata->dev->name);
1698 wk->tries = 0;
1699 wk->state = IEEE80211_MGD_STATE_AUTH;
1700 WARN_ON(ieee80211_authenticate(sdata, wk) != RX_MGMT_NONE);
1701 }
1702
1703 if (ifmgd->associated && 1225 if (ifmgd->associated &&
1704 memcmp(mgmt->bssid, ifmgd->associated->cbss.bssid, ETH_ALEN) == 0 && 1226 memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0 &&
1705 ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 1227 ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
1706 IEEE80211_STA_CONNECTION_POLL)) { 1228 IEEE80211_STA_CONNECTION_POLL)) {
1707 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1229 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -1774,7 +1296,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1774 if (!ifmgd->associated) 1296 if (!ifmgd->associated)
1775 return; 1297 return;
1776 1298
1777 bssid = ifmgd->associated->cbss.bssid; 1299 bssid = ifmgd->associated->bssid;
1778 1300
1779 /* 1301 /*
1780 * And in theory even frames from a different AP we were just 1302 * And in theory even frames from a different AP we were just
@@ -1787,7 +1309,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1787#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1309#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1788 if (net_ratelimit()) { 1310 if (net_ratelimit()) {
1789 printk(KERN_DEBUG "%s: cancelling probereq poll due " 1311 printk(KERN_DEBUG "%s: cancelling probereq poll due "
1790 "to a received beacon\n", sdata->dev->name); 1312 "to a received beacon\n", sdata->name);
1791 } 1313 }
1792#endif 1314#endif
1793 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 1315 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
@@ -1865,7 +1387,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1865 1387
1866 rcu_read_lock(); 1388 rcu_read_lock();
1867 1389
1868 sta = sta_info_get(local, bssid); 1390 sta = sta_info_get(sdata, bssid);
1869 if (WARN_ON(!sta)) { 1391 if (WARN_ON(!sta)) {
1870 rcu_read_unlock(); 1392 rcu_read_unlock();
1871 return; 1393 return;
@@ -1913,9 +1435,6 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1913 switch (fc & IEEE80211_FCTL_STYPE) { 1435 switch (fc & IEEE80211_FCTL_STYPE) {
1914 case IEEE80211_STYPE_PROBE_RESP: 1436 case IEEE80211_STYPE_PROBE_RESP:
1915 case IEEE80211_STYPE_BEACON: 1437 case IEEE80211_STYPE_BEACON:
1916 case IEEE80211_STYPE_AUTH:
1917 case IEEE80211_STYPE_ASSOC_RESP:
1918 case IEEE80211_STYPE_REASSOC_RESP:
1919 case IEEE80211_STYPE_DEAUTH: 1438 case IEEE80211_STYPE_DEAUTH:
1920 case IEEE80211_STYPE_DISASSOC: 1439 case IEEE80211_STYPE_DISASSOC:
1921 case IEEE80211_STYPE_ACTION: 1440 case IEEE80211_STYPE_ACTION:
@@ -1933,7 +1452,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1933 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1452 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1934 struct ieee80211_rx_status *rx_status; 1453 struct ieee80211_rx_status *rx_status;
1935 struct ieee80211_mgmt *mgmt; 1454 struct ieee80211_mgmt *mgmt;
1936 struct ieee80211_mgd_work *wk;
1937 enum rx_mgmt_action rma = RX_MGMT_NONE; 1455 enum rx_mgmt_action rma = RX_MGMT_NONE;
1938 u16 fc; 1456 u16 fc;
1939 1457
@@ -1944,20 +1462,17 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1944 mutex_lock(&ifmgd->mtx); 1462 mutex_lock(&ifmgd->mtx);
1945 1463
1946 if (ifmgd->associated && 1464 if (ifmgd->associated &&
1947 memcmp(ifmgd->associated->cbss.bssid, mgmt->bssid, 1465 memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) {
1948 ETH_ALEN) == 0) {
1949 switch (fc & IEEE80211_FCTL_STYPE) { 1466 switch (fc & IEEE80211_FCTL_STYPE) {
1950 case IEEE80211_STYPE_BEACON: 1467 case IEEE80211_STYPE_BEACON:
1951 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, 1468 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
1952 rx_status); 1469 rx_status);
1953 break; 1470 break;
1954 case IEEE80211_STYPE_PROBE_RESP: 1471 case IEEE80211_STYPE_PROBE_RESP:
1955 ieee80211_rx_mgmt_probe_resp(sdata, NULL, mgmt, 1472 ieee80211_rx_mgmt_probe_resp(sdata, skb);
1956 skb->len, rx_status);
1957 break; 1473 break;
1958 case IEEE80211_STYPE_DEAUTH: 1474 case IEEE80211_STYPE_DEAUTH:
1959 rma = ieee80211_rx_mgmt_deauth(sdata, NULL, 1475 rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
1960 mgmt, skb->len);
1961 break; 1476 break;
1962 case IEEE80211_STYPE_DISASSOC: 1477 case IEEE80211_STYPE_DISASSOC:
1963 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); 1478 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
@@ -1968,7 +1483,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1968 1483
1969 ieee80211_sta_process_chanswitch(sdata, 1484 ieee80211_sta_process_chanswitch(sdata,
1970 &mgmt->u.action.u.chan_switch.sw_elem, 1485 &mgmt->u.action.u.chan_switch.sw_elem,
1971 ifmgd->associated); 1486 (void *)ifmgd->associated->priv);
1972 break; 1487 break;
1973 } 1488 }
1974 mutex_unlock(&ifmgd->mtx); 1489 mutex_unlock(&ifmgd->mtx);
@@ -1989,58 +1504,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1989 goto out; 1504 goto out;
1990 } 1505 }
1991 1506
1992 list_for_each_entry(wk, &ifmgd->work_list, list) {
1993 if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
1994 continue;
1995
1996 switch (fc & IEEE80211_FCTL_STYPE) {
1997 case IEEE80211_STYPE_PROBE_RESP:
1998 ieee80211_rx_mgmt_probe_resp(sdata, wk, mgmt, skb->len,
1999 rx_status);
2000 break;
2001 case IEEE80211_STYPE_AUTH:
2002 rma = ieee80211_rx_mgmt_auth(sdata, wk, mgmt, skb->len);
2003 break;
2004 case IEEE80211_STYPE_ASSOC_RESP:
2005 rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
2006 skb->len, false);
2007 break;
2008 case IEEE80211_STYPE_REASSOC_RESP:
2009 rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
2010 skb->len, true);
2011 break;
2012 case IEEE80211_STYPE_DEAUTH:
2013 rma = ieee80211_rx_mgmt_deauth(sdata, wk, mgmt,
2014 skb->len);
2015 break;
2016 }
2017 /*
2018 * We've processed this frame for that work, so it can't
2019 * belong to another work struct.
2020 * NB: this is also required for correctness because the
2021 * called functions can free 'wk', and for 'rma'!
2022 */
2023 break;
2024 }
2025
2026 mutex_unlock(&ifmgd->mtx); 1507 mutex_unlock(&ifmgd->mtx);
2027 1508
2028 switch (rma) { 1509 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
2029 case RX_MGMT_NONE: 1510 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
2030 /* no action */
2031 break;
2032 case RX_MGMT_CFG80211_AUTH:
2033 cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, skb->len);
2034 break;
2035 case RX_MGMT_CFG80211_ASSOC:
2036 cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len);
2037 break;
2038 case RX_MGMT_CFG80211_DEAUTH:
2039 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1511 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
2040 break;
2041 default:
2042 WARN(1, "unexpected: %d", rma);
2043 }
2044 1512
2045 out: 1513 out:
2046 kfree_skb(skb); 1514 kfree_skb(skb);
@@ -2068,12 +1536,8 @@ static void ieee80211_sta_work(struct work_struct *work)
2068 struct ieee80211_local *local = sdata->local; 1536 struct ieee80211_local *local = sdata->local;
2069 struct ieee80211_if_managed *ifmgd; 1537 struct ieee80211_if_managed *ifmgd;
2070 struct sk_buff *skb; 1538 struct sk_buff *skb;
2071 struct ieee80211_mgd_work *wk, *tmp;
2072 LIST_HEAD(free_work);
2073 enum rx_mgmt_action rma;
2074 bool anybusy = false;
2075 1539
2076 if (!netif_running(sdata->dev)) 1540 if (!ieee80211_sdata_running(sdata))
2077 return; 1541 return;
2078 1542
2079 if (local->scanning) 1543 if (local->scanning)
@@ -2104,7 +1568,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2104 ifmgd->associated) { 1568 ifmgd->associated) {
2105 u8 bssid[ETH_ALEN]; 1569 u8 bssid[ETH_ALEN];
2106 1570
2107 memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); 1571 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
2108 if (time_is_after_jiffies(ifmgd->probe_timeout)) 1572 if (time_is_after_jiffies(ifmgd->probe_timeout))
2109 run_again(ifmgd, ifmgd->probe_timeout); 1573 run_again(ifmgd, ifmgd->probe_timeout);
2110 1574
@@ -2126,7 +1590,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2126 printk(KERN_DEBUG "No probe response from AP %pM" 1590 printk(KERN_DEBUG "No probe response from AP %pM"
2127 " after %dms, disconnecting.\n", 1591 " after %dms, disconnecting.\n",
2128 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 1592 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
2129 ieee80211_set_disassoc(sdata, true); 1593 ieee80211_set_disassoc(sdata);
2130 ieee80211_recalc_idle(local); 1594 ieee80211_recalc_idle(local);
2131 mutex_unlock(&ifmgd->mtx); 1595 mutex_unlock(&ifmgd->mtx);
2132 /* 1596 /*
@@ -2141,87 +1605,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2141 } 1605 }
2142 } 1606 }
2143 1607
2144
2145 ieee80211_recalc_idle(local);
2146
2147 list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) {
2148 if (time_is_after_jiffies(wk->timeout)) {
2149 /*
2150 * This work item isn't supposed to be worked on
2151 * right now, but take care to adjust the timer
2152 * properly.
2153 */
2154 run_again(ifmgd, wk->timeout);
2155 continue;
2156 }
2157
2158 switch (wk->state) {
2159 default:
2160 WARN_ON(1);
2161 /* fall through */
2162 case IEEE80211_MGD_STATE_IDLE:
2163 /* nothing */
2164 rma = RX_MGMT_NONE;
2165 break;
2166 case IEEE80211_MGD_STATE_PROBE:
2167 rma = ieee80211_direct_probe(sdata, wk);
2168 break;
2169 case IEEE80211_MGD_STATE_AUTH:
2170 rma = ieee80211_authenticate(sdata, wk);
2171 break;
2172 case IEEE80211_MGD_STATE_ASSOC:
2173 rma = ieee80211_associate(sdata, wk);
2174 break;
2175 }
2176
2177 switch (rma) {
2178 case RX_MGMT_NONE:
2179 /* no action required */
2180 break;
2181 case RX_MGMT_CFG80211_AUTH_TO:
2182 case RX_MGMT_CFG80211_ASSOC_TO:
2183 list_del(&wk->list);
2184 list_add(&wk->list, &free_work);
2185 wk->tries = rma; /* small abuse but only local */
2186 break;
2187 default:
2188 WARN(1, "unexpected: %d", rma);
2189 }
2190 }
2191
2192 list_for_each_entry(wk, &ifmgd->work_list, list) {
2193 if (wk->state != IEEE80211_MGD_STATE_IDLE) {
2194 anybusy = true;
2195 break;
2196 }
2197 }
2198 if (!anybusy &&
2199 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request))
2200 ieee80211_queue_delayed_work(&local->hw,
2201 &local->scan_work,
2202 round_jiffies_relative(0));
2203
2204 mutex_unlock(&ifmgd->mtx); 1608 mutex_unlock(&ifmgd->mtx);
2205
2206 list_for_each_entry_safe(wk, tmp, &free_work, list) {
2207 switch (wk->tries) {
2208 case RX_MGMT_CFG80211_AUTH_TO:
2209 cfg80211_send_auth_timeout(sdata->dev,
2210 wk->bss->cbss.bssid);
2211 break;
2212 case RX_MGMT_CFG80211_ASSOC_TO:
2213 cfg80211_send_assoc_timeout(sdata->dev,
2214 wk->bss->cbss.bssid);
2215 break;
2216 default:
2217 WARN(1, "unexpected: %d", wk->tries);
2218 }
2219
2220 list_del(&wk->list);
2221 kfree(wk);
2222 }
2223
2224 ieee80211_recalc_idle(local);
2225} 1609}
2226 1610
2227static void ieee80211_sta_bcn_mon_timer(unsigned long data) 1611static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -2330,14 +1714,14 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2330 (unsigned long) sdata); 1714 (unsigned long) sdata);
2331 skb_queue_head_init(&ifmgd->skb_queue); 1715 skb_queue_head_init(&ifmgd->skb_queue);
2332 1716
2333 INIT_LIST_HEAD(&ifmgd->work_list);
2334
2335 ifmgd->capab = WLAN_CAPABILITY_ESS;
2336 ifmgd->flags = 0; 1717 ifmgd->flags = 0;
2337 if (sdata->local->hw.queues >= 4)
2338 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
2339 1718
2340 mutex_init(&ifmgd->mtx); 1719 mutex_init(&ifmgd->mtx);
1720
1721 if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
1722 ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
1723 else
1724 ifmgd->req_smps = IEEE80211_SMPS_OFF;
2341} 1725}
2342 1726
2343/* scan finished notification */ 1727/* scan finished notification */
@@ -2368,12 +1752,34 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
2368} 1752}
2369 1753
2370/* config hooks */ 1754/* config hooks */
1755static enum work_done_result
1756ieee80211_probe_auth_done(struct ieee80211_work *wk,
1757 struct sk_buff *skb)
1758{
1759 if (!skb) {
1760 cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta);
1761 return WORK_DONE_DESTROY;
1762 }
1763
1764 if (wk->type == IEEE80211_WORK_AUTH) {
1765 cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len);
1766 return WORK_DONE_DESTROY;
1767 }
1768
1769 mutex_lock(&wk->sdata->u.mgd.mtx);
1770 ieee80211_rx_mgmt_probe_resp(wk->sdata, skb);
1771 mutex_unlock(&wk->sdata->u.mgd.mtx);
1772
1773 wk->type = IEEE80211_WORK_AUTH;
1774 wk->probe_auth.tries = 0;
1775 return WORK_DONE_REQUEUE;
1776}
1777
2371int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 1778int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2372 struct cfg80211_auth_request *req) 1779 struct cfg80211_auth_request *req)
2373{ 1780{
2374 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2375 const u8 *ssid; 1781 const u8 *ssid;
2376 struct ieee80211_mgd_work *wk; 1782 struct ieee80211_work *wk;
2377 u16 auth_alg; 1783 u16 auth_alg;
2378 1784
2379 switch (req->auth_type) { 1785 switch (req->auth_type) {
@@ -2397,7 +1803,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2397 if (!wk) 1803 if (!wk)
2398 return -ENOMEM; 1804 return -ENOMEM;
2399 1805
2400 wk->bss = (void *)req->bss; 1806 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
2401 1807
2402 if (req->ie && req->ie_len) { 1808 if (req->ie && req->ie_len) {
2403 memcpy(wk->ie, req->ie, req->ie_len); 1809 memcpy(wk->ie, req->ie, req->ie_len);
@@ -2405,66 +1811,76 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2405 } 1811 }
2406 1812
2407 if (req->key && req->key_len) { 1813 if (req->key && req->key_len) {
2408 wk->key_len = req->key_len; 1814 wk->probe_auth.key_len = req->key_len;
2409 wk->key_idx = req->key_idx; 1815 wk->probe_auth.key_idx = req->key_idx;
2410 memcpy(wk->key, req->key, req->key_len); 1816 memcpy(wk->probe_auth.key, req->key, req->key_len);
2411 } 1817 }
2412 1818
2413 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 1819 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
2414 memcpy(wk->ssid, ssid + 2, ssid[1]); 1820 memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]);
2415 wk->ssid_len = ssid[1]; 1821 wk->probe_auth.ssid_len = ssid[1];
2416 1822
2417 wk->state = IEEE80211_MGD_STATE_PROBE; 1823 wk->probe_auth.algorithm = auth_alg;
2418 wk->auth_alg = auth_alg; 1824 wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
2419 wk->timeout = jiffies; /* run right away */
2420 1825
2421 /* 1826 wk->type = IEEE80211_WORK_DIRECT_PROBE;
2422 * XXX: if still associated need to tell AP that we're going 1827 wk->chan = req->bss->channel;
2423 * to sleep and then change channel etc. 1828 wk->sdata = sdata;
2424 */ 1829 wk->done = ieee80211_probe_auth_done;
2425 sdata->local->oper_channel = req->bss->channel;
2426 ieee80211_hw_config(sdata->local, 0);
2427
2428 mutex_lock(&ifmgd->mtx);
2429 list_add(&wk->list, &sdata->u.mgd.work_list);
2430 mutex_unlock(&ifmgd->mtx);
2431 1830
2432 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); 1831 ieee80211_add_work(wk);
2433 return 0; 1832 return 0;
2434} 1833}
2435 1834
2436int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, 1835static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2437 struct cfg80211_assoc_request *req) 1836 struct sk_buff *skb)
2438{ 1837{
2439 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1838 struct ieee80211_mgmt *mgmt;
2440 struct ieee80211_mgd_work *wk, *found = NULL; 1839 u16 status;
2441 int i, err;
2442 1840
2443 mutex_lock(&ifmgd->mtx); 1841 if (!skb) {
1842 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
1843 return WORK_DONE_DESTROY;
1844 }
2444 1845
2445 list_for_each_entry(wk, &ifmgd->work_list, list) { 1846 mgmt = (void *)skb->data;
2446 if (&wk->bss->cbss == req->bss && 1847 status = le16_to_cpu(mgmt->u.assoc_resp.status_code);
2447 wk->state == IEEE80211_MGD_STATE_IDLE) { 1848
2448 found = wk; 1849 if (status == WLAN_STATUS_SUCCESS) {
2449 break; 1850 mutex_lock(&wk->sdata->u.mgd.mtx);
1851 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
1852 mutex_unlock(&wk->sdata->u.mgd.mtx);
1853 /* oops -- internal error -- send timeout for now */
1854 cfg80211_send_assoc_timeout(wk->sdata->dev,
1855 wk->filter_ta);
1856 return WORK_DONE_DESTROY;
2450 } 1857 }
1858 mutex_unlock(&wk->sdata->u.mgd.mtx);
2451 } 1859 }
2452 1860
2453 if (!found) { 1861 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
2454 err = -ENOLINK; 1862 return WORK_DONE_DESTROY;
2455 goto out; 1863}
2456 }
2457 1864
2458 list_del(&found->list); 1865int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
1866 struct cfg80211_assoc_request *req)
1867{
1868 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1869 struct ieee80211_bss *bss = (void *)req->bss->priv;
1870 struct ieee80211_work *wk;
1871 const u8 *ssid;
1872 int i;
2459 1873
2460 wk = krealloc(found, sizeof(*wk) + req->ie_len, GFP_KERNEL); 1874 mutex_lock(&ifmgd->mtx);
2461 if (!wk) { 1875 if (ifmgd->associated) {
2462 list_add(&found->list, &ifmgd->work_list); 1876 mutex_unlock(&ifmgd->mtx);
2463 err = -ENOMEM; 1877 return -EALREADY;
2464 goto out;
2465 } 1878 }
1879 mutex_unlock(&ifmgd->mtx);
2466 1880
2467 list_add(&wk->list, &ifmgd->work_list); 1881 wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL);
1882 if (!wk)
1883 return -ENOMEM;
2468 1884
2469 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 1885 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
2470 1886
@@ -2474,8 +1890,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2474 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) 1890 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
2475 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 1891 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2476 1892
2477 sdata->local->oper_channel = req->bss->channel;
2478 ieee80211_hw_config(sdata->local, 0);
2479 1893
2480 if (req->ie && req->ie_len) { 1894 if (req->ie && req->ie_len) {
2481 memcpy(wk->ie, req->ie, req->ie_len); 1895 memcpy(wk->ie, req->ie, req->ie_len);
@@ -2483,12 +1897,55 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2483 } else 1897 } else
2484 wk->ie_len = 0; 1898 wk->ie_len = 0;
2485 1899
1900 wk->assoc.bss = req->bss;
1901
1902 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
1903
1904 /* new association always uses requested smps mode */
1905 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
1906 if (ifmgd->powersave)
1907 ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
1908 else
1909 ifmgd->ap_smps = IEEE80211_SMPS_OFF;
1910 } else
1911 ifmgd->ap_smps = ifmgd->req_smps;
1912
1913 wk->assoc.smps = ifmgd->ap_smps;
1914 /*
1915 * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
1916 * We still associate in non-HT mode (11a/b/g) if any one of these
1917 * ciphers is configured as pairwise.
1918 * We can set this to true for non-11n hardware, that'll be checked
1919 * separately along with the peer capabilities.
1920 */
1921 wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N);
1922 wk->assoc.capability = req->bss->capability;
1923 wk->assoc.wmm_used = bss->wmm_used;
1924 wk->assoc.supp_rates = bss->supp_rates;
1925 wk->assoc.supp_rates_len = bss->supp_rates_len;
1926 wk->assoc.ht_information_ie =
1927 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
1928
1929 if (bss->wmm_used && bss->uapsd_supported &&
1930 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
1931 wk->assoc.uapsd_used = true;
1932 ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
1933 } else {
1934 wk->assoc.uapsd_used = false;
1935 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
1936 }
1937
1938 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
1939 memcpy(wk->assoc.ssid, ssid + 2, ssid[1]);
1940 wk->assoc.ssid_len = ssid[1];
1941
2486 if (req->prev_bssid) 1942 if (req->prev_bssid)
2487 memcpy(wk->prev_bssid, req->prev_bssid, ETH_ALEN); 1943 memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
2488 1944
2489 wk->state = IEEE80211_MGD_STATE_ASSOC; 1945 wk->type = IEEE80211_WORK_ASSOC;
2490 wk->tries = 0; 1946 wk->chan = req->bss->channel;
2491 wk->timeout = jiffies; /* run right away */ 1947 wk->sdata = sdata;
1948 wk->done = ieee80211_assoc_done;
2492 1949
2493 if (req->use_mfp) { 1950 if (req->use_mfp) {
2494 ifmgd->mfp = IEEE80211_MFP_REQUIRED; 1951 ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2503,69 +1960,59 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2503 else 1960 else
2504 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; 1961 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
2505 1962
2506 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); 1963 ieee80211_add_work(wk);
2507 1964 return 0;
2508 err = 0;
2509
2510 out:
2511 mutex_unlock(&ifmgd->mtx);
2512 return err;
2513} 1965}
2514 1966
2515int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, 1967int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2516 struct cfg80211_deauth_request *req, 1968 struct cfg80211_deauth_request *req,
2517 void *cookie) 1969 void *cookie)
2518{ 1970{
1971 struct ieee80211_local *local = sdata->local;
2519 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1972 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2520 struct ieee80211_mgd_work *wk; 1973 struct ieee80211_work *wk;
2521 const u8 *bssid = NULL; 1974 const u8 *bssid = req->bss->bssid;
2522 bool not_auth_yet = false;
2523 1975
2524 mutex_lock(&ifmgd->mtx); 1976 mutex_lock(&ifmgd->mtx);
2525 1977
2526 if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) { 1978 if (ifmgd->associated == req->bss) {
2527 bssid = req->bss->bssid; 1979 bssid = req->bss->bssid;
2528 ieee80211_set_disassoc(sdata, true); 1980 ieee80211_set_disassoc(sdata);
2529 } else list_for_each_entry(wk, &ifmgd->work_list, list) { 1981 mutex_unlock(&ifmgd->mtx);
2530 if (&wk->bss->cbss == req->bss) { 1982 } else {
2531 bssid = req->bss->bssid; 1983 bool not_auth_yet = false;
2532 if (wk->state == IEEE80211_MGD_STATE_PROBE) 1984
2533 not_auth_yet = true; 1985 mutex_unlock(&ifmgd->mtx);
1986
1987 mutex_lock(&local->work_mtx);
1988 list_for_each_entry(wk, &local->work_list, list) {
1989 if (wk->type != IEEE80211_WORK_DIRECT_PROBE)
1990 continue;
1991 if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
1992 continue;
1993 not_auth_yet = true;
2534 list_del(&wk->list); 1994 list_del(&wk->list);
2535 kfree(wk); 1995 free_work(wk);
2536 break; 1996 break;
2537 } 1997 }
2538 } 1998 mutex_unlock(&local->work_mtx);
2539
2540 /*
2541 * If somebody requests authentication and we haven't
2542 * sent out an auth frame yet there's no need to send
2543 * out a deauth frame either. If the state was PROBE,
2544 * then this is the case. If it's AUTH we have sent a
2545 * frame, and if it's IDLE we have completed the auth
2546 * process already.
2547 */
2548 if (not_auth_yet) {
2549 mutex_unlock(&ifmgd->mtx);
2550 __cfg80211_auth_canceled(sdata->dev, bssid);
2551 return 0;
2552 }
2553 1999
2554 /* 2000 /*
2555 * cfg80211 should catch this ... but it's racy since 2001 * If somebody requests authentication and we haven't
2556 * we can receive a deauth frame, process it, hand it 2002 * sent out an auth frame yet there's no need to send
2557 * to cfg80211 while that's in a locked section already 2003 * out a deauth frame either. If the state was PROBE,
2558 * trying to tell us that the user wants to disconnect. 2004 * then this is the case. If it's AUTH we have sent a
2559 */ 2005 * frame, and if it's IDLE we have completed the auth
2560 if (!bssid) { 2006 * process already.
2561 mutex_unlock(&ifmgd->mtx); 2007 */
2562 return -ENOLINK; 2008 if (not_auth_yet) {
2009 __cfg80211_auth_canceled(sdata->dev, bssid);
2010 return 0;
2011 }
2563 } 2012 }
2564 2013
2565 mutex_unlock(&ifmgd->mtx);
2566
2567 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", 2014 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
2568 sdata->dev->name, bssid, req->reason_code); 2015 sdata->name, bssid, req->reason_code);
2569 2016
2570 ieee80211_send_deauth_disassoc(sdata, bssid, 2017 ieee80211_send_deauth_disassoc(sdata, bssid,
2571 IEEE80211_STYPE_DEAUTH, req->reason_code, 2018 IEEE80211_STYPE_DEAUTH, req->reason_code,
@@ -2590,15 +2037,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2590 * to cfg80211 while that's in a locked section already 2037 * to cfg80211 while that's in a locked section already
2591 * trying to tell us that the user wants to disconnect. 2038 * trying to tell us that the user wants to disconnect.
2592 */ 2039 */
2593 if (&ifmgd->associated->cbss != req->bss) { 2040 if (ifmgd->associated != req->bss) {
2594 mutex_unlock(&ifmgd->mtx); 2041 mutex_unlock(&ifmgd->mtx);
2595 return -ENOLINK; 2042 return -ENOLINK;
2596 } 2043 }
2597 2044
2598 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", 2045 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
2599 sdata->dev->name, req->bss->bssid, req->reason_code); 2046 sdata->name, req->bss->bssid, req->reason_code);
2600 2047
2601 ieee80211_set_disassoc(sdata, false); 2048 ieee80211_set_disassoc(sdata);
2602 2049
2603 mutex_unlock(&ifmgd->mtx); 2050 mutex_unlock(&ifmgd->mtx);
2604 2051
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
new file mode 100644
index 00000000000..c36b1911987
--- /dev/null
+++ b/net/mac80211/offchannel.c
@@ -0,0 +1,170 @@
1/*
2 * Off-channel operation helpers
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#include <net/mac80211.h>
16#include "ieee80211_i.h"
17
18/*
19 * inform AP that we will go to sleep so that it will buffer the frames
20 * while we scan
21 */
22static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
23{
24 struct ieee80211_local *local = sdata->local;
25
26 local->offchannel_ps_enabled = false;
27
28 /* FIXME: what to do when local->pspolling is true? */
29
30 del_timer_sync(&local->dynamic_ps_timer);
31 cancel_work_sync(&local->dynamic_ps_enable_work);
32
33 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
34 local->offchannel_ps_enabled = true;
35 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
36 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
37 }
38
39 if (!(local->offchannel_ps_enabled) ||
40 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
41 /*
42 * If power save was enabled, no need to send a nullfunc
43 * frame because AP knows that we are sleeping. But if the
44 * hardware is creating the nullfunc frame for power save
45 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
46 * enabled) and power save was enabled, the firmware just
47 * sent a null frame with power save disabled. So we need
48 * to send a new nullfunc frame to inform the AP that we
49 * are again sleeping.
50 */
51 ieee80211_send_nullfunc(local, sdata, 1);
52}
53
54/* inform AP that we are awake again, unless power save is enabled */
55static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
56{
57 struct ieee80211_local *local = sdata->local;
58
59 if (!local->ps_sdata)
60 ieee80211_send_nullfunc(local, sdata, 0);
61 else if (local->offchannel_ps_enabled) {
62 /*
63 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
64 * will send a nullfunc frame with the powersave bit set
65 * even though the AP already knows that we are sleeping.
66 * This could be avoided by sending a null frame with power
67 * save bit disabled before enabling the power save, but
68 * this doesn't gain anything.
69 *
70 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
71 * to send a nullfunc frame because AP already knows that
72 * we are sleeping, let's just enable power save mode in
73 * hardware.
74 */
75 local->hw.conf.flags |= IEEE80211_CONF_PS;
76 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
77 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
78 /*
79 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
80 * had been running before leaving the operating channel,
81 * restart the timer now and send a nullfunc frame to inform
82 * the AP that we are awake.
83 */
84 ieee80211_send_nullfunc(local, sdata, 0);
85 mod_timer(&local->dynamic_ps_timer, jiffies +
86 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
87 }
88}
89
90void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
91{
92 struct ieee80211_sub_if_data *sdata;
93
94 mutex_lock(&local->iflist_mtx);
95 list_for_each_entry(sdata, &local->interfaces, list) {
96 if (!ieee80211_sdata_running(sdata))
97 continue;
98
99 /* disable beaconing */
100 if (sdata->vif.type == NL80211_IFTYPE_AP ||
101 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
102 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
103 ieee80211_bss_info_change_notify(
104 sdata, BSS_CHANGED_BEACON_ENABLED);
105
106 /*
107 * only handle non-STA interfaces here, STA interfaces
108 * are handled in ieee80211_offchannel_stop_station(),
109 * e.g., from the background scan state machine.
110 *
111 * In addition, do not stop monitor interface to allow it to be
112 * used from user space controlled off-channel operations.
113 */
114 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
115 sdata->vif.type != NL80211_IFTYPE_MONITOR)
116 netif_tx_stop_all_queues(sdata->dev);
117 }
118 mutex_unlock(&local->iflist_mtx);
119}
120
121void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
122{
123 struct ieee80211_sub_if_data *sdata;
124
125 /*
126 * notify the AP about us leaving the channel and stop all STA interfaces
127 */
128 mutex_lock(&local->iflist_mtx);
129 list_for_each_entry(sdata, &local->interfaces, list) {
130 if (!ieee80211_sdata_running(sdata))
131 continue;
132
133 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
134 netif_tx_stop_all_queues(sdata->dev);
135 if (sdata->u.mgd.associated)
136 ieee80211_offchannel_ps_enable(sdata);
137 }
138 }
139 mutex_unlock(&local->iflist_mtx);
140}
141
142void ieee80211_offchannel_return(struct ieee80211_local *local,
143 bool enable_beaconing)
144{
145 struct ieee80211_sub_if_data *sdata;
146
147 mutex_lock(&local->iflist_mtx);
148 list_for_each_entry(sdata, &local->interfaces, list) {
149 if (!ieee80211_sdata_running(sdata))
150 continue;
151
152 /* Tell AP we're back */
153 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
154 if (sdata->u.mgd.associated)
155 ieee80211_offchannel_ps_disable(sdata);
156 }
157
158 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
159 netif_tx_wake_all_queues(sdata->dev);
160
161 /* re-enable beaconing */
162 if (enable_beaconing &&
163 (sdata->vif.type == NL80211_IFTYPE_AP ||
164 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
165 sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
166 ieee80211_bss_info_change_notify(
167 sdata, BSS_CHANGED_BEACON_ENABLED);
168 }
169 mutex_unlock(&local->iflist_mtx);
170}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e535f1c988f..47f818959ad 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -10,7 +10,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
10{ 10{
11 struct ieee80211_local *local = hw_to_local(hw); 11 struct ieee80211_local *local = hw_to_local(hw);
12 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
13 struct ieee80211_if_init_conf conf;
14 struct sta_info *sta; 13 struct sta_info *sta;
15 unsigned long flags; 14 unsigned long flags;
16 15
@@ -65,7 +64,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
65 struct ieee80211_sub_if_data, 64 struct ieee80211_sub_if_data,
66 u.ap); 65 u.ap);
67 66
68 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE, 67 drv_sta_notify(local, sdata, STA_NOTIFY_REMOVE,
69 &sta->sta); 68 &sta->sta);
70 } 69 }
71 70
@@ -93,17 +92,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
93 break; 92 break;
94 } 93 }
95 94
96 if (!netif_running(sdata->dev)) 95 if (!ieee80211_sdata_running(sdata))
97 continue; 96 continue;
98 97
99 /* disable beaconing */ 98 /* disable beaconing */
100 ieee80211_bss_info_change_notify(sdata, 99 ieee80211_bss_info_change_notify(sdata,
101 BSS_CHANGED_BEACON_ENABLED); 100 BSS_CHANGED_BEACON_ENABLED);
102 101
103 conf.vif = &sdata->vif; 102 drv_remove_interface(local, &sdata->vif);
104 conf.type = sdata->vif.type;
105 conf.mac_addr = sdata->dev->dev_addr;
106 drv_remove_interface(local, &conf);
107 } 103 }
108 104
109 /* stop hardware - this must stop RX */ 105 /* stop hardware - this must stop RX */
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b9007f80cb9..c74b7c85403 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -207,6 +207,27 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
207 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc)); 207 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc));
208} 208}
209 209
210static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
211{
212 u8 i;
213
214 if (basic_rates == 0)
215 return; /* assume basic rates unknown and accept rate */
216 if (*idx < 0)
217 return;
218 if (basic_rates & (1 << *idx))
219 return; /* selected rate is a basic rate */
220
221 for (i = *idx + 1; i <= max_rate_idx; i++) {
222 if (basic_rates & (1 << i)) {
223 *idx = i;
224 return;
225 }
226 }
227
228 /* could not find a basic rate; use original selection */
229}
230
210bool rate_control_send_low(struct ieee80211_sta *sta, 231bool rate_control_send_low(struct ieee80211_sta *sta,
211 void *priv_sta, 232 void *priv_sta,
212 struct ieee80211_tx_rate_control *txrc) 233 struct ieee80211_tx_rate_control *txrc)
@@ -218,12 +239,48 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
218 info->control.rates[0].count = 239 info->control.rates[0].count =
219 (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 240 (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
220 1 : txrc->hw->max_rate_tries; 241 1 : txrc->hw->max_rate_tries;
242 if (!sta && txrc->ap)
243 rc_send_low_broadcast(&info->control.rates[0].idx,
244 txrc->bss_conf->basic_rates,
245 txrc->sband->n_bitrates);
221 return true; 246 return true;
222 } 247 }
223 return false; 248 return false;
224} 249}
225EXPORT_SYMBOL(rate_control_send_low); 250EXPORT_SYMBOL(rate_control_send_low);
226 251
252static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
253 int n_bitrates, u32 mask)
254{
255 int j;
256
257 /* See whether the selected rate or anything below it is allowed. */
258 for (j = rate->idx; j >= 0; j--) {
259 if (mask & (1 << j)) {
260 /* Okay, found a suitable rate. Use it. */
261 rate->idx = j;
262 return;
263 }
264 }
265
266 /* Try to find a higher rate that would be allowed */
267 for (j = rate->idx + 1; j < n_bitrates; j++) {
268 if (mask & (1 << j)) {
269 /* Okay, found a suitable rate. Use it. */
270 rate->idx = j;
271 return;
272 }
273 }
274
275 /*
276 * Uh.. No suitable rate exists. This should not really happen with
277 * sane TX rate mask configurations. However, should someone manage to
278 * configure supported rates and TX rate mask in incompatible way,
279 * allow the frame to be transmitted with whatever the rate control
280 * selected.
281 */
282}
283
227void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, 284void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
228 struct sta_info *sta, 285 struct sta_info *sta,
229 struct ieee80211_tx_rate_control *txrc) 286 struct ieee80211_tx_rate_control *txrc)
@@ -233,6 +290,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
233 struct ieee80211_sta *ista = NULL; 290 struct ieee80211_sta *ista = NULL;
234 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); 291 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
235 int i; 292 int i;
293 u32 mask;
236 294
237 if (sta) { 295 if (sta) {
238 ista = &sta->sta; 296 ista = &sta->sta;
@@ -245,23 +303,31 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
245 info->control.rates[i].count = 1; 303 info->control.rates[i].count = 1;
246 } 304 }
247 305
248 if (sta && sdata->force_unicast_rateidx > -1) { 306 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
249 info->control.rates[0].idx = sdata->force_unicast_rateidx;
250 } else {
251 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
252 info->flags |= IEEE80211_TX_INTFL_RCALGO;
253 }
254 307
255 /* 308 /*
256 * try to enforce the maximum rate the user wanted 309 * Try to enforce the rateidx mask the user wanted. skip this if the
310 * default mask (allow all rates) is used to save some processing for
311 * the common case.
257 */ 312 */
258 if (sdata->max_ratectrl_rateidx > -1) 313 mask = sdata->rc_rateidx_mask[info->band];
314 if (mask != (1 << txrc->sband->n_bitrates) - 1) {
315 if (sta) {
316 /* Filter out rates that the STA does not support */
317 mask &= sta->sta.supp_rates[info->band];
318 }
319 /*
320 * Make sure the rate index selected for each TX rate is
321 * included in the configured mask and change the rate indexes
322 * if needed.
323 */
259 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 324 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
325 /* Rate masking supports only legacy rates for now */
260 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) 326 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
261 continue; 327 continue;
262 info->control.rates[i].idx = 328 rate_idx_match_mask(&info->control.rates[i],
263 min_t(s8, info->control.rates[i].idx, 329 txrc->sband->n_bitrates, mask);
264 sdata->max_ratectrl_rateidx); 330 }
265 } 331 }
266 332
267 BUG_ON(info->control.rates[0].idx < 0); 333 BUG_ON(info->control.rates[0].idx < 0);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index cb9bd1f65e2..669dddd4052 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -44,10 +44,7 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
44 struct rate_control_ref *ref = local->rate_ctrl; 44 struct rate_control_ref *ref = local->rate_ctrl;
45 struct ieee80211_sta *ista = &sta->sta; 45 struct ieee80211_sta *ista = &sta->sta;
46 void *priv_sta = sta->rate_ctrl_priv; 46 void *priv_sta = sta->rate_ctrl_priv;
47 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 47 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
48
49 if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO))
50 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
51} 48}
52 49
53 50
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 82a30c1bf3a..a8e15b84c05 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -283,15 +283,15 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
283 skb->protocol = htons(ETH_P_802_2); 283 skb->protocol = htons(ETH_P_802_2);
284 284
285 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 285 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
286 if (!netif_running(sdata->dev))
287 continue;
288
289 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 286 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
290 continue; 287 continue;
291 288
292 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 289 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
293 continue; 290 continue;
294 291
292 if (!ieee80211_sdata_running(sdata))
293 continue;
294
295 if (prev_dev) { 295 if (prev_dev) {
296 skb2 = skb_clone(skb, GFP_ATOMIC); 296 skb2 = skb_clone(skb, GFP_ATOMIC);
297 if (skb2) { 297 if (skb2) {
@@ -361,7 +361,9 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
361 * boundary. In the case of regular frames, this simply means aligning the 361 * boundary. In the case of regular frames, this simply means aligning the
362 * payload to a four-byte boundary (because either the IP header is directly 362 * payload to a four-byte boundary (because either the IP header is directly
363 * contained, or IV/RFC1042 headers that have a length divisible by four are 363 * contained, or IV/RFC1042 headers that have a length divisible by four are
364 * in front of it). 364 * in front of it). If the payload data is not properly aligned and the
365 * architecture doesn't support efficient unaligned operations, mac80211
366 * will align the data.
365 * 367 *
366 * With A-MSDU frames, however, the payload data address must yield two modulo 368 * With A-MSDU frames, however, the payload data address must yield two modulo
367 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 369 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
@@ -375,25 +377,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
375 */ 377 */
376static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 378static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
377{ 379{
378 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 380#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
379 int hdrlen; 381 WARN_ONCE((unsigned long)rx->skb->data & 1,
380 382 "unaligned packet at 0x%p\n", rx->skb->data);
381#ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
382 return;
383#endif 383#endif
384
385 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
386 "unaligned packet at 0x%p\n", rx->skb->data))
387 return;
388
389 if (!ieee80211_is_data_present(hdr->frame_control))
390 return;
391
392 hdrlen = ieee80211_hdrlen(hdr->frame_control);
393 if (rx->flags & IEEE80211_RX_AMSDU)
394 hdrlen += ETH_HLEN;
395 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
396 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
397} 384}
398 385
399 386
@@ -476,7 +463,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
476{ 463{
477 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 464 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
478 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); 465 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
479 char *dev_addr = rx->sdata->dev->dev_addr; 466 char *dev_addr = rx->sdata->vif.addr;
480 467
481 if (ieee80211_is_data(hdr->frame_control)) { 468 if (ieee80211_is_data(hdr->frame_control)) {
482 if (is_multicast_ether_addr(hdr->addr1)) { 469 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1021,10 +1008,10 @@ static void ap_sta_ps_start(struct sta_info *sta)
1021 1008
1022 atomic_inc(&sdata->bss->num_sta_ps); 1009 atomic_inc(&sdata->bss->num_sta_ps);
1023 set_sta_flags(sta, WLAN_STA_PS_STA); 1010 set_sta_flags(sta, WLAN_STA_PS_STA);
1024 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); 1011 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1025#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1012#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1026 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1013 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1027 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1014 sdata->name, sta->sta.addr, sta->sta.aid);
1028#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1015#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1029} 1016}
1030 1017
@@ -1038,13 +1025,13 @@ static void ap_sta_ps_end(struct sta_info *sta)
1038 1025
1039#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1026#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1040 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1027 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1041 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1028 sdata->name, sta->sta.addr, sta->sta.aid);
1042#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1029#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1043 1030
1044 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { 1031 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1045#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1032#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1046 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1033 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1047 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1034 sdata->name, sta->sta.addr, sta->sta.aid);
1048#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1035#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1049 return; 1036 return;
1050 } 1037 }
@@ -1124,6 +1111,18 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1124 if (ieee80211_is_nullfunc(hdr->frame_control) || 1111 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1125 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1112 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1126 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1113 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1114
1115 /*
1116 * If we receive a 4-addr nullfunc frame from a STA
1117 * that was not moved to a 4-addr STA vlan yet, drop
1118 * the frame to the monitor interface, to make sure
1119 * that hostapd sees it
1120 */
1121 if (ieee80211_has_a4(hdr->frame_control) &&
1122 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1123 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1124 !rx->sdata->u.vlan.sta)))
1125 return RX_DROP_MONITOR;
1127 /* 1126 /*
1128 * Update counter and free packet here to avoid 1127 * Update counter and free packet here to avoid
1129 * counting this as a dropped packed. 1128 * counting this as a dropped packed.
@@ -1156,7 +1155,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1156 printk(KERN_DEBUG "%s: RX reassembly removed oldest " 1155 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1157 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " 1156 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1158 "addr1=%pM addr2=%pM\n", 1157 "addr1=%pM addr2=%pM\n",
1159 sdata->dev->name, idx, 1158 sdata->name, idx,
1160 jiffies - entry->first_frag_time, entry->seq, 1159 jiffies - entry->first_frag_time, entry->seq,
1161 entry->last_frag, hdr->addr1, hdr->addr2); 1160 entry->last_frag, hdr->addr1, hdr->addr2);
1162#endif 1161#endif
@@ -1424,7 +1423,6 @@ static int
1424__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1423__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1425{ 1424{
1426 struct ieee80211_sub_if_data *sdata = rx->sdata; 1425 struct ieee80211_sub_if_data *sdata = rx->sdata;
1427 struct net_device *dev = sdata->dev;
1428 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1429 1427
1430 if (ieee80211_has_a4(hdr->frame_control) && 1428 if (ieee80211_has_a4(hdr->frame_control) &&
@@ -1436,7 +1434,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1436 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr))) 1434 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1437 return -1; 1435 return -1;
1438 1436
1439 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type); 1437 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1440} 1438}
1441 1439
1442/* 1440/*
@@ -1453,7 +1451,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1453 * of whether the frame was encrypted or not. 1451 * of whether the frame was encrypted or not.
1454 */ 1452 */
1455 if (ehdr->h_proto == htons(ETH_P_PAE) && 1453 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1456 (compare_ether_addr(ehdr->h_dest, rx->sdata->dev->dev_addr) == 0 || 1454 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1457 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1455 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1458 return true; 1456 return true;
1459 1457
@@ -1472,7 +1470,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1472{ 1470{
1473 struct ieee80211_sub_if_data *sdata = rx->sdata; 1471 struct ieee80211_sub_if_data *sdata = rx->sdata;
1474 struct net_device *dev = sdata->dev; 1472 struct net_device *dev = sdata->dev;
1475 struct ieee80211_local *local = rx->local;
1476 struct sk_buff *skb, *xmit_skb; 1473 struct sk_buff *skb, *xmit_skb;
1477 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1474 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1478 struct sta_info *dsta; 1475 struct sta_info *dsta;
@@ -1495,8 +1492,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1495 printk(KERN_DEBUG "%s: failed to clone " 1492 printk(KERN_DEBUG "%s: failed to clone "
1496 "multicast frame\n", dev->name); 1493 "multicast frame\n", dev->name);
1497 } else { 1494 } else {
1498 dsta = sta_info_get(local, skb->data); 1495 dsta = sta_info_get(sdata, skb->data);
1499 if (dsta && dsta->sdata->dev == dev) { 1496 if (dsta) {
1500 /* 1497 /*
1501 * The destination station is associated to 1498 * The destination station is associated to
1502 * this AP (in this VLAN), so send the frame 1499 * this AP (in this VLAN), so send the frame
@@ -1512,7 +1509,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1512 if (skb) { 1509 if (skb) {
1513 int align __maybe_unused; 1510 int align __maybe_unused;
1514 1511
1515#if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 1512#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1516 /* 1513 /*
1517 * 'align' will only take the values 0 or 2 here 1514 * 'align' will only take the values 0 or 2 here
1518 * since all frames are required to be aligned 1515 * since all frames are required to be aligned
@@ -1556,16 +1553,10 @@ static ieee80211_rx_result debug_noinline
1556ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1553ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1557{ 1554{
1558 struct net_device *dev = rx->sdata->dev; 1555 struct net_device *dev = rx->sdata->dev;
1559 struct ieee80211_local *local = rx->local; 1556 struct sk_buff *skb = rx->skb;
1560 u16 ethertype;
1561 u8 *payload;
1562 struct sk_buff *skb = rx->skb, *frame = NULL;
1563 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1557 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1564 __le16 fc = hdr->frame_control; 1558 __le16 fc = hdr->frame_control;
1565 const struct ethhdr *eth; 1559 struct sk_buff_head frame_list;
1566 int remaining, err;
1567 u8 dst[ETH_ALEN];
1568 u8 src[ETH_ALEN];
1569 1560
1570 if (unlikely(!ieee80211_is_data(fc))) 1561 if (unlikely(!ieee80211_is_data(fc)))
1571 return RX_CONTINUE; 1562 return RX_CONTINUE;
@@ -1576,94 +1567,34 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1576 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1567 if (!(rx->flags & IEEE80211_RX_AMSDU))
1577 return RX_CONTINUE; 1568 return RX_CONTINUE;
1578 1569
1579 err = __ieee80211_data_to_8023(rx); 1570 if (ieee80211_has_a4(hdr->frame_control) &&
1580 if (unlikely(err)) 1571 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1572 !rx->sdata->u.vlan.sta)
1581 return RX_DROP_UNUSABLE; 1573 return RX_DROP_UNUSABLE;
1582 1574
1583 skb->dev = dev; 1575 if (is_multicast_ether_addr(hdr->addr1) &&
1584 1576 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1585 dev->stats.rx_packets++; 1577 rx->sdata->u.vlan.sta) ||
1586 dev->stats.rx_bytes += skb->len; 1578 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1587 1579 rx->sdata->u.mgd.use_4addr)))
1588 /* skip the wrapping header */
1589 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1590 if (!eth)
1591 return RX_DROP_UNUSABLE; 1580 return RX_DROP_UNUSABLE;
1592 1581
1593 while (skb != frame) { 1582 skb->dev = dev;
1594 u8 padding; 1583 __skb_queue_head_init(&frame_list);
1595 __be16 len = eth->h_proto;
1596 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1597
1598 remaining = skb->len;
1599 memcpy(dst, eth->h_dest, ETH_ALEN);
1600 memcpy(src, eth->h_source, ETH_ALEN);
1601 1584
1602 padding = ((4 - subframe_len) & 0x3); 1585 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1603 /* the last MSDU has no padding */ 1586 rx->sdata->vif.type,
1604 if (subframe_len > remaining) 1587 rx->local->hw.extra_tx_headroom);
1605 return RX_DROP_UNUSABLE;
1606 1588
1607 skb_pull(skb, sizeof(struct ethhdr)); 1589 while (!skb_queue_empty(&frame_list)) {
1608 /* if last subframe reuse skb */ 1590 rx->skb = __skb_dequeue(&frame_list);
1609 if (remaining <= subframe_len + padding)
1610 frame = skb;
1611 else {
1612 /*
1613 * Allocate and reserve two bytes more for payload
1614 * alignment since sizeof(struct ethhdr) is 14.
1615 */
1616 frame = dev_alloc_skb(
1617 ALIGN(local->hw.extra_tx_headroom, 4) +
1618 subframe_len + 2);
1619
1620 if (frame == NULL)
1621 return RX_DROP_UNUSABLE;
1622
1623 skb_reserve(frame,
1624 ALIGN(local->hw.extra_tx_headroom, 4) +
1625 sizeof(struct ethhdr) + 2);
1626 memcpy(skb_put(frame, ntohs(len)), skb->data,
1627 ntohs(len));
1628
1629 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1630 padding);
1631 if (!eth) {
1632 dev_kfree_skb(frame);
1633 return RX_DROP_UNUSABLE;
1634 }
1635 }
1636
1637 skb_reset_network_header(frame);
1638 frame->dev = dev;
1639 frame->priority = skb->priority;
1640 rx->skb = frame;
1641
1642 payload = frame->data;
1643 ethertype = (payload[6] << 8) | payload[7];
1644
1645 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1646 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1647 compare_ether_addr(payload,
1648 bridge_tunnel_header) == 0)) {
1649 /* remove RFC1042 or Bridge-Tunnel
1650 * encapsulation and replace EtherType */
1651 skb_pull(frame, 6);
1652 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1653 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1654 } else {
1655 memcpy(skb_push(frame, sizeof(__be16)),
1656 &len, sizeof(__be16));
1657 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1658 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1659 }
1660 1591
1661 if (!ieee80211_frame_allowed(rx, fc)) { 1592 if (!ieee80211_frame_allowed(rx, fc)) {
1662 if (skb == frame) /* last frame */ 1593 dev_kfree_skb(rx->skb);
1663 return RX_DROP_UNUSABLE;
1664 dev_kfree_skb(frame);
1665 continue; 1594 continue;
1666 } 1595 }
1596 dev->stats.rx_packets++;
1597 dev->stats.rx_bytes += rx->skb->len;
1667 1598
1668 ieee80211_deliver_skb(rx); 1599 ieee80211_deliver_skb(rx);
1669 } 1600 }
@@ -1721,7 +1652,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1721 1652
1722 /* Frame has reached destination. Don't forward */ 1653 /* Frame has reached destination. Don't forward */
1723 if (!is_multicast_ether_addr(hdr->addr1) && 1654 if (!is_multicast_ether_addr(hdr->addr1) &&
1724 compare_ether_addr(sdata->dev->dev_addr, hdr->addr3) == 0) 1655 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1725 return RX_CONTINUE; 1656 return RX_CONTINUE;
1726 1657
1727 mesh_hdr->ttl--; 1658 mesh_hdr->ttl--;
@@ -1738,10 +1669,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1738 1669
1739 if (!fwd_skb && net_ratelimit()) 1670 if (!fwd_skb && net_ratelimit())
1740 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1671 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1741 sdata->dev->name); 1672 sdata->name);
1742 1673
1743 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1674 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1744 memcpy(fwd_hdr->addr2, sdata->dev->dev_addr, ETH_ALEN); 1675 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1745 info = IEEE80211_SKB_CB(fwd_skb); 1676 info = IEEE80211_SKB_CB(fwd_skb);
1746 memset(info, 0, sizeof(*info)); 1677 memset(info, 0, sizeof(*info));
1747 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1678 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
@@ -1872,7 +1803,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1872 struct sk_buff *skb; 1803 struct sk_buff *skb;
1873 struct ieee80211_mgmt *resp; 1804 struct ieee80211_mgmt *resp;
1874 1805
1875 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) { 1806 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
1876 /* Not to own unicast address */ 1807 /* Not to own unicast address */
1877 return; 1808 return;
1878 } 1809 }
@@ -1896,7 +1827,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1896 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 1827 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1897 memset(resp, 0, 24); 1828 memset(resp, 0, 24);
1898 memcpy(resp->da, mgmt->sa, ETH_ALEN); 1829 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1899 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN); 1830 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
1900 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 1831 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1901 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1832 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1902 IEEE80211_STYPE_ACTION); 1833 IEEE80211_STYPE_ACTION);
@@ -2032,6 +1963,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2032{ 1963{
2033 struct ieee80211_sub_if_data *sdata = rx->sdata; 1964 struct ieee80211_sub_if_data *sdata = rx->sdata;
2034 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1965 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1966 ieee80211_rx_result rxs;
2035 1967
2036 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1968 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2037 return RX_DROP_MONITOR; 1969 return RX_DROP_MONITOR;
@@ -2039,6 +1971,10 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2039 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control)) 1971 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
2040 return RX_DROP_MONITOR; 1972 return RX_DROP_MONITOR;
2041 1973
1974 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
1975 if (rxs != RX_CONTINUE)
1976 return rxs;
1977
2042 if (ieee80211_vif_is_mesh(&sdata->vif)) 1978 if (ieee80211_vif_is_mesh(&sdata->vif))
2043 return ieee80211_mesh_rx_mgmt(sdata, rx->skb); 1979 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
2044 1980
@@ -2143,7 +2079,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2143 skb->protocol = htons(ETH_P_802_2); 2079 skb->protocol = htons(ETH_P_802_2);
2144 2080
2145 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2081 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2146 if (!netif_running(sdata->dev)) 2082 if (!ieee80211_sdata_running(sdata))
2147 continue; 2083 continue;
2148 2084
2149 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 2085 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
@@ -2280,7 +2216,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2280 if (!bssid && !sdata->u.mgd.use_4addr) 2216 if (!bssid && !sdata->u.mgd.use_4addr)
2281 return 0; 2217 return 0;
2282 if (!multicast && 2218 if (!multicast &&
2283 compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) { 2219 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2284 if (!(sdata->dev->flags & IFF_PROMISC)) 2220 if (!(sdata->dev->flags & IFF_PROMISC))
2285 return 0; 2221 return 0;
2286 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2222 rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2297,7 +2233,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2297 return 0; 2233 return 0;
2298 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2234 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2299 } else if (!multicast && 2235 } else if (!multicast &&
2300 compare_ether_addr(sdata->dev->dev_addr, 2236 compare_ether_addr(sdata->vif.addr,
2301 hdr->addr1) != 0) { 2237 hdr->addr1) != 0) {
2302 if (!(sdata->dev->flags & IFF_PROMISC)) 2238 if (!(sdata->dev->flags & IFF_PROMISC))
2303 return 0; 2239 return 0;
@@ -2314,7 +2250,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2314 break; 2250 break;
2315 case NL80211_IFTYPE_MESH_POINT: 2251 case NL80211_IFTYPE_MESH_POINT:
2316 if (!multicast && 2252 if (!multicast &&
2317 compare_ether_addr(sdata->dev->dev_addr, 2253 compare_ether_addr(sdata->vif.addr,
2318 hdr->addr1) != 0) { 2254 hdr->addr1) != 0) {
2319 if (!(sdata->dev->flags & IFF_PROMISC)) 2255 if (!(sdata->dev->flags & IFF_PROMISC))
2320 return 0; 2256 return 0;
@@ -2325,11 +2261,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2325 case NL80211_IFTYPE_AP_VLAN: 2261 case NL80211_IFTYPE_AP_VLAN:
2326 case NL80211_IFTYPE_AP: 2262 case NL80211_IFTYPE_AP:
2327 if (!bssid) { 2263 if (!bssid) {
2328 if (compare_ether_addr(sdata->dev->dev_addr, 2264 if (compare_ether_addr(sdata->vif.addr,
2329 hdr->addr1)) 2265 hdr->addr1))
2330 return 0; 2266 return 0;
2331 } else if (!ieee80211_bssid_match(bssid, 2267 } else if (!ieee80211_bssid_match(bssid,
2332 sdata->dev->dev_addr)) { 2268 sdata->vif.addr)) {
2333 if (!(rx->flags & IEEE80211_RX_IN_SCAN)) 2269 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2334 return 0; 2270 return 0;
2335 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2271 rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2368,6 +2304,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2368 int prepares; 2304 int prepares;
2369 struct ieee80211_sub_if_data *prev = NULL; 2305 struct ieee80211_sub_if_data *prev = NULL;
2370 struct sk_buff *skb_new; 2306 struct sk_buff *skb_new;
2307 struct sta_info *sta, *tmp;
2308 bool found_sta = false;
2371 2309
2372 hdr = (struct ieee80211_hdr *)skb->data; 2310 hdr = (struct ieee80211_hdr *)skb->data;
2373 memset(&rx, 0, sizeof(rx)); 2311 memset(&rx, 0, sizeof(rx));
@@ -2384,68 +2322,76 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2384 ieee80211_parse_qos(&rx); 2322 ieee80211_parse_qos(&rx);
2385 ieee80211_verify_alignment(&rx); 2323 ieee80211_verify_alignment(&rx);
2386 2324
2387 rx.sta = sta_info_get(local, hdr->addr2); 2325 if (ieee80211_is_data(hdr->frame_control)) {
2388 if (rx.sta) 2326 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2389 rx.sdata = rx.sta->sdata; 2327 rx.sta = sta;
2390 2328 found_sta = true;
2391 if (rx.sdata && ieee80211_is_data(hdr->frame_control)) { 2329 rx.sdata = sta->sdata;
2392 rx.flags |= IEEE80211_RX_RA_MATCH; 2330
2393 prepares = prepare_for_handlers(rx.sdata, &rx, hdr); 2331 rx.flags |= IEEE80211_RX_RA_MATCH;
2394 if (prepares) { 2332 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2395 if (status->flag & RX_FLAG_MMIC_ERROR) { 2333 if (prepares) {
2396 if (rx.flags & IEEE80211_RX_RA_MATCH) 2334 if (status->flag & RX_FLAG_MMIC_ERROR) {
2397 ieee80211_rx_michael_mic_report(hdr, &rx); 2335 if (rx.flags & IEEE80211_RX_RA_MATCH)
2398 } else 2336 ieee80211_rx_michael_mic_report(hdr, &rx);
2399 prev = rx.sdata; 2337 } else
2338 prev = rx.sdata;
2339 }
2400 } 2340 }
2401 } else list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2341 }
2402 if (!netif_running(sdata->dev)) 2342 if (!found_sta) {
2403 continue; 2343 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2344 if (!ieee80211_sdata_running(sdata))
2345 continue;
2404 2346
2405 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 2347 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2406 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2348 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2407 continue; 2349 continue;
2408 2350
2409 rx.flags |= IEEE80211_RX_RA_MATCH; 2351 rx.sta = sta_info_get(sdata, hdr->addr2);
2410 prepares = prepare_for_handlers(sdata, &rx, hdr);
2411 2352
2412 if (!prepares) 2353 rx.flags |= IEEE80211_RX_RA_MATCH;
2413 continue; 2354 prepares = prepare_for_handlers(sdata, &rx, hdr);
2414 2355
2415 if (status->flag & RX_FLAG_MMIC_ERROR) { 2356 if (!prepares)
2416 rx.sdata = sdata; 2357 continue;
2417 if (rx.flags & IEEE80211_RX_RA_MATCH)
2418 ieee80211_rx_michael_mic_report(hdr, &rx);
2419 continue;
2420 }
2421 2358
2422 /* 2359 if (status->flag & RX_FLAG_MMIC_ERROR) {
2423 * frame is destined for this interface, but if it's not 2360 rx.sdata = sdata;
2424 * also for the previous one we handle that after the 2361 if (rx.flags & IEEE80211_RX_RA_MATCH)
2425 * loop to avoid copying the SKB once too much 2362 ieee80211_rx_michael_mic_report(hdr,
2426 */ 2363 &rx);
2364 continue;
2365 }
2427 2366
2428 if (!prev) { 2367 /*
2429 prev = sdata; 2368 * frame is destined for this interface, but if it's
2430 continue; 2369 * not also for the previous one we handle that after
2431 } 2370 * the loop to avoid copying the SKB once too much
2371 */
2432 2372
2433 /* 2373 if (!prev) {
2434 * frame was destined for the previous interface 2374 prev = sdata;
2435 * so invoke RX handlers for it 2375 continue;
2436 */ 2376 }
2437 2377
2438 skb_new = skb_copy(skb, GFP_ATOMIC); 2378 /*
2439 if (!skb_new) { 2379 * frame was destined for the previous interface
2440 if (net_ratelimit()) 2380 * so invoke RX handlers for it
2441 printk(KERN_DEBUG "%s: failed to copy " 2381 */
2442 "multicast frame for %s\n", 2382
2443 wiphy_name(local->hw.wiphy), 2383 skb_new = skb_copy(skb, GFP_ATOMIC);
2444 prev->dev->name); 2384 if (!skb_new) {
2445 continue; 2385 if (net_ratelimit())
2386 printk(KERN_DEBUG "%s: failed to copy "
2387 "multicast frame for %s\n",
2388 wiphy_name(local->hw.wiphy),
2389 prev->name);
2390 continue;
2391 }
2392 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2393 prev = sdata;
2446 } 2394 }
2447 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2448 prev = sdata;
2449 } 2395 }
2450 if (prev) 2396 if (prev)
2451 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate); 2397 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f934c9620b7..9afe2f9885d 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -12,7 +12,6 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/wireless.h>
16#include <linux/if_arp.h> 15#include <linux/if_arp.h>
17#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
18#include <net/mac80211.h> 17#include <net/mac80211.h>
@@ -29,16 +28,19 @@ struct ieee80211_bss *
29ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, 28ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
30 u8 *ssid, u8 ssid_len) 29 u8 *ssid, u8 ssid_len)
31{ 30{
32 return (void *)cfg80211_get_bss(local->hw.wiphy, 31 struct cfg80211_bss *cbss;
33 ieee80211_get_channel(local->hw.wiphy, 32
34 freq), 33 cbss = cfg80211_get_bss(local->hw.wiphy,
35 bssid, ssid, ssid_len, 34 ieee80211_get_channel(local->hw.wiphy, freq),
36 0, 0); 35 bssid, ssid, ssid_len, 0, 0);
36 if (!cbss)
37 return NULL;
38 return (void *)cbss->priv;
37} 39}
38 40
39static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss) 41static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
40{ 42{
41 struct ieee80211_bss *bss = (void *)cbss; 43 struct ieee80211_bss *bss = (void *)cbss->priv;
42 44
43 kfree(bss_mesh_id(bss)); 45 kfree(bss_mesh_id(bss));
44 kfree(bss_mesh_cfg(bss)); 46 kfree(bss_mesh_cfg(bss));
@@ -47,7 +49,26 @@ static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
47void ieee80211_rx_bss_put(struct ieee80211_local *local, 49void ieee80211_rx_bss_put(struct ieee80211_local *local,
48 struct ieee80211_bss *bss) 50 struct ieee80211_bss *bss)
49{ 51{
50 cfg80211_put_bss((struct cfg80211_bss *)bss); 52 if (!bss)
53 return;
54 cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv));
55}
56
57static bool is_uapsd_supported(struct ieee802_11_elems *elems)
58{
59 u8 qos_info;
60
61 if (elems->wmm_info && elems->wmm_info_len == 7
62 && elems->wmm_info[5] == 1)
63 qos_info = elems->wmm_info[6];
64 else if (elems->wmm_param && elems->wmm_param_len == 24
65 && elems->wmm_param[5] == 1)
66 qos_info = elems->wmm_param[6];
67 else
68 /* no valid wmm information or parameter element found */
69 return false;
70
71 return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD;
51} 72}
52 73
53struct ieee80211_bss * 74struct ieee80211_bss *
@@ -59,6 +80,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
59 struct ieee80211_channel *channel, 80 struct ieee80211_channel *channel,
60 bool beacon) 81 bool beacon)
61{ 82{
83 struct cfg80211_bss *cbss;
62 struct ieee80211_bss *bss; 84 struct ieee80211_bss *bss;
63 int clen; 85 int clen;
64 s32 signal = 0; 86 s32 signal = 0;
@@ -68,13 +90,14 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
68 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 90 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
69 signal = (rx_status->signal * 100) / local->hw.max_signal; 91 signal = (rx_status->signal * 100) / local->hw.max_signal;
70 92
71 bss = (void *)cfg80211_inform_bss_frame(local->hw.wiphy, channel, 93 cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
72 mgmt, len, signal, GFP_ATOMIC); 94 mgmt, len, signal, GFP_ATOMIC);
73 95
74 if (!bss) 96 if (!cbss)
75 return NULL; 97 return NULL;
76 98
77 bss->cbss.free_priv = ieee80211_rx_bss_free; 99 cbss->free_priv = ieee80211_rx_bss_free;
100 bss = (void *)cbss->priv;
78 101
79 /* save the ERP value so that it is available at association time */ 102 /* save the ERP value so that it is available at association time */
80 if (elems->erp_info && elems->erp_info_len >= 1) { 103 if (elems->erp_info && elems->erp_info_len >= 1) {
@@ -111,6 +134,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
111 } 134 }
112 135
113 bss->wmm_used = elems->wmm_param || elems->wmm_info; 136 bss->wmm_used = elems->wmm_param || elems->wmm_info;
137 bss->uapsd_supported = is_uapsd_supported(elems);
114 138
115 if (!beacon) 139 if (!beacon)
116 bss->last_probe_resp = jiffies; 140 bss->last_probe_resp = jiffies;
@@ -147,7 +171,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
147 presp = ieee80211_is_probe_resp(fc); 171 presp = ieee80211_is_probe_resp(fc);
148 if (presp) { 172 if (presp) {
149 /* ignore ProbeResp to foreign address */ 173 /* ignore ProbeResp to foreign address */
150 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 174 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
151 return RX_DROP_MONITOR; 175 return RX_DROP_MONITOR;
152 176
153 presp = true; 177 presp = true;
@@ -220,82 +244,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
220 return true; 244 return true;
221} 245}
222 246
223/*
224 * inform AP that we will go to sleep so that it will buffer the frames
225 * while we scan
226 */
227static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
228{
229 struct ieee80211_local *local = sdata->local;
230
231 local->scan_ps_enabled = false;
232
233 /* FIXME: what to do when local->pspolling is true? */
234
235 del_timer_sync(&local->dynamic_ps_timer);
236 cancel_work_sync(&local->dynamic_ps_enable_work);
237
238 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
239 local->scan_ps_enabled = true;
240 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
241 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
242 }
243
244 if (!(local->scan_ps_enabled) ||
245 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
246 /*
247 * If power save was enabled, no need to send a nullfunc
248 * frame because AP knows that we are sleeping. But if the
249 * hardware is creating the nullfunc frame for power save
250 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
251 * enabled) and power save was enabled, the firmware just
252 * sent a null frame with power save disabled. So we need
253 * to send a new nullfunc frame to inform the AP that we
254 * are again sleeping.
255 */
256 ieee80211_send_nullfunc(local, sdata, 1);
257}
258
259/* inform AP that we are awake again, unless power save is enabled */
260static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
261{
262 struct ieee80211_local *local = sdata->local;
263
264 if (!local->ps_sdata)
265 ieee80211_send_nullfunc(local, sdata, 0);
266 else if (local->scan_ps_enabled) {
267 /*
268 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
269 * will send a nullfunc frame with the powersave bit set
270 * even though the AP already knows that we are sleeping.
271 * This could be avoided by sending a null frame with power
272 * save bit disabled before enabling the power save, but
273 * this doesn't gain anything.
274 *
275 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
276 * to send a nullfunc frame because AP already knows that
277 * we are sleeping, let's just enable power save mode in
278 * hardware.
279 */
280 local->hw.conf.flags |= IEEE80211_CONF_PS;
281 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
282 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
283 /*
284 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
285 * had been running before leaving the operating channel,
286 * restart the timer now and send a nullfunc frame to inform
287 * the AP that we are awake.
288 */
289 ieee80211_send_nullfunc(local, sdata, 0);
290 mod_timer(&local->dynamic_ps_timer, jiffies +
291 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
292 }
293}
294
295void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 247void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
296{ 248{
297 struct ieee80211_local *local = hw_to_local(hw); 249 struct ieee80211_local *local = hw_to_local(hw);
298 struct ieee80211_sub_if_data *sdata;
299 bool was_hw_scan; 250 bool was_hw_scan;
300 251
301 mutex_lock(&local->scan_mtx); 252 mutex_lock(&local->scan_mtx);
@@ -344,41 +295,19 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
344 295
345 drv_sw_scan_complete(local); 296 drv_sw_scan_complete(local);
346 297
347 mutex_lock(&local->iflist_mtx); 298 ieee80211_offchannel_return(local, true);
348 list_for_each_entry(sdata, &local->interfaces, list) {
349 if (!netif_running(sdata->dev))
350 continue;
351
352 /* Tell AP we're back */
353 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
354 if (sdata->u.mgd.associated) {
355 ieee80211_scan_ps_disable(sdata);
356 netif_tx_wake_all_queues(sdata->dev);
357 }
358 } else
359 netif_tx_wake_all_queues(sdata->dev);
360
361 /* re-enable beaconing */
362 if (sdata->vif.type == NL80211_IFTYPE_AP ||
363 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
364 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
365 ieee80211_bss_info_change_notify(
366 sdata, BSS_CHANGED_BEACON_ENABLED);
367 }
368 mutex_unlock(&local->iflist_mtx);
369 299
370 done: 300 done:
371 ieee80211_recalc_idle(local); 301 ieee80211_recalc_idle(local);
372 ieee80211_mlme_notify_scan_completed(local); 302 ieee80211_mlme_notify_scan_completed(local);
373 ieee80211_ibss_notify_scan_completed(local); 303 ieee80211_ibss_notify_scan_completed(local);
374 ieee80211_mesh_notify_scan_completed(local); 304 ieee80211_mesh_notify_scan_completed(local);
305 ieee80211_queue_work(&local->hw, &local->work_work);
375} 306}
376EXPORT_SYMBOL(ieee80211_scan_completed); 307EXPORT_SYMBOL(ieee80211_scan_completed);
377 308
378static int ieee80211_start_sw_scan(struct ieee80211_local *local) 309static int ieee80211_start_sw_scan(struct ieee80211_local *local)
379{ 310{
380 struct ieee80211_sub_if_data *sdata;
381
382 /* 311 /*
383 * Hardware/driver doesn't support hw_scan, so use software 312 * Hardware/driver doesn't support hw_scan, so use software
384 * scanning instead. First send a nullfunc frame with power save 313 * scanning instead. First send a nullfunc frame with power save
@@ -394,33 +323,15 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
394 */ 323 */
395 drv_sw_scan_start(local); 324 drv_sw_scan_start(local);
396 325
397 mutex_lock(&local->iflist_mtx); 326 ieee80211_offchannel_stop_beaconing(local);
398 list_for_each_entry(sdata, &local->interfaces, list) {
399 if (!netif_running(sdata->dev))
400 continue;
401
402 /* disable beaconing */
403 if (sdata->vif.type == NL80211_IFTYPE_AP ||
404 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
405 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
406 ieee80211_bss_info_change_notify(
407 sdata, BSS_CHANGED_BEACON_ENABLED);
408
409 /*
410 * only handle non-STA interfaces here, STA interfaces
411 * are handled in the scan state machine
412 */
413 if (sdata->vif.type != NL80211_IFTYPE_STATION)
414 netif_tx_stop_all_queues(sdata->dev);
415 }
416 mutex_unlock(&local->iflist_mtx);
417 327
418 local->next_scan_state = SCAN_DECISION; 328 local->next_scan_state = SCAN_DECISION;
419 local->scan_channel_idx = 0; 329 local->scan_channel_idx = 0;
420 330
331 drv_flush(local, false);
332
421 ieee80211_configure_filter(local); 333 ieee80211_configure_filter(local);
422 334
423 /* TODO: start scan as soon as all nullfunc frames are ACKed */
424 ieee80211_queue_delayed_work(&local->hw, 335 ieee80211_queue_delayed_work(&local->hw,
425 &local->scan_work, 336 &local->scan_work,
426 IEEE80211_CHANNEL_TIME); 337 IEEE80211_CHANNEL_TIME);
@@ -433,7 +344,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
433 struct cfg80211_scan_request *req) 344 struct cfg80211_scan_request *req)
434{ 345{
435 struct ieee80211_local *local = sdata->local; 346 struct ieee80211_local *local = sdata->local;
436 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
437 int rc; 347 int rc;
438 348
439 if (local->scan_req) 349 if (local->scan_req)
@@ -463,11 +373,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
463 local->scan_req = req; 373 local->scan_req = req;
464 local->scan_sdata = sdata; 374 local->scan_sdata = sdata;
465 375
466 if (req != local->int_scan_req && 376 if (!list_empty(&local->work_list)) {
467 sdata->vif.type == NL80211_IFTYPE_STATION && 377 /* wait for the work to finish/time out */
468 !list_empty(&ifmgd->work_list)) {
469 /* actually wait for the work it's doing to finish/time out */
470 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
471 return 0; 378 return 0;
472 } 379 }
473 380
@@ -526,7 +433,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
526 /* check if at least one STA interface is associated */ 433 /* check if at least one STA interface is associated */
527 mutex_lock(&local->iflist_mtx); 434 mutex_lock(&local->iflist_mtx);
528 list_for_each_entry(sdata, &local->interfaces, list) { 435 list_for_each_entry(sdata, &local->interfaces, list) {
529 if (!netif_running(sdata->dev)) 436 if (!ieee80211_sdata_running(sdata))
530 continue; 437 continue;
531 438
532 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 439 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -564,56 +471,35 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
564static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, 471static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
565 unsigned long *next_delay) 472 unsigned long *next_delay)
566{ 473{
567 struct ieee80211_sub_if_data *sdata; 474 ieee80211_offchannel_stop_station(local);
475
476 __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
568 477
569 /* 478 /*
570 * notify the AP about us leaving the channel and stop all STA interfaces 479 * What if the nullfunc frames didn't arrive?
571 */ 480 */
572 mutex_lock(&local->iflist_mtx); 481 drv_flush(local, false);
573 list_for_each_entry(sdata, &local->interfaces, list) { 482 if (local->ops->flush)
574 if (!netif_running(sdata->dev)) 483 *next_delay = 0;
575 continue; 484 else
576 485 *next_delay = HZ / 10;
577 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
578 netif_tx_stop_all_queues(sdata->dev);
579 if (sdata->u.mgd.associated)
580 ieee80211_scan_ps_enable(sdata);
581 }
582 }
583 mutex_unlock(&local->iflist_mtx);
584
585 __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
586 486
587 /* advance to the next channel to be scanned */ 487 /* advance to the next channel to be scanned */
588 *next_delay = HZ / 10;
589 local->next_scan_state = SCAN_SET_CHANNEL; 488 local->next_scan_state = SCAN_SET_CHANNEL;
590} 489}
591 490
592static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local, 491static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
593 unsigned long *next_delay) 492 unsigned long *next_delay)
594{ 493{
595 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
596
597 /* switch back to the operating channel */ 494 /* switch back to the operating channel */
598 local->scan_channel = NULL; 495 local->scan_channel = NULL;
599 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 496 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
600 497
601 /* 498 /*
602 * notify the AP about us being back and restart all STA interfaces 499 * Only re-enable station mode interface now; beaconing will be
500 * re-enabled once the full scan has been completed.
603 */ 501 */
604 mutex_lock(&local->iflist_mtx); 502 ieee80211_offchannel_return(local, false);
605 list_for_each_entry(sdata, &local->interfaces, list) {
606 if (!netif_running(sdata->dev))
607 continue;
608
609 /* Tell AP we're back */
610 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
611 if (sdata->u.mgd.associated)
612 ieee80211_scan_ps_disable(sdata);
613 netif_tx_wake_all_queues(sdata->dev);
614 }
615 }
616 mutex_unlock(&local->iflist_mtx);
617 503
618 __clear_bit(SCAN_OFF_CHANNEL, &local->scanning); 504 __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
619 505
@@ -727,7 +613,7 @@ void ieee80211_scan_work(struct work_struct *work)
727 /* 613 /*
728 * Avoid re-scheduling when the sdata is going away. 614 * Avoid re-scheduling when the sdata is going away.
729 */ 615 */
730 if (!netif_running(sdata->dev)) { 616 if (!ieee80211_sdata_running(sdata)) {
731 ieee80211_scan_completed(&local->hw, true); 617 ieee80211_scan_completed(&local->hw, true);
732 return; 618 return;
733 } 619 }
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index aa743a895cf..7733f66ee2c 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -35,7 +35,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
35 35
36 if (!skb) { 36 if (!skb) {
37 printk(KERN_ERR "%s: failed to allocate buffer for " 37 printk(KERN_ERR "%s: failed to allocate buffer for "
38 "measurement report frame\n", sdata->dev->name); 38 "measurement report frame\n", sdata->name);
39 return; 39 return;
40 } 40 }
41 41
@@ -43,7 +43,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); 43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
44 memset(msr_report, 0, 24); 44 memset(msr_report, 0, 24);
45 memcpy(msr_report->da, da, ETH_ALEN); 45 memcpy(msr_report->da, da, ETH_ALEN);
46 memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN); 46 memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
47 memcpy(msr_report->bssid, bssid, ETH_ALEN); 47 memcpy(msr_report->bssid, bssid, ETH_ALEN);
48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
49 IEEE80211_STYPE_ACTION); 49 IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 71f370dd24b..f735826f055 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -103,13 +103,37 @@ static int sta_info_hash_del(struct ieee80211_local *local,
103} 103}
104 104
105/* protected by RCU */ 105/* protected by RCU */
106struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr) 106struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
107 const u8 *addr)
107{ 108{
109 struct ieee80211_local *local = sdata->local;
108 struct sta_info *sta; 110 struct sta_info *sta;
109 111
110 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 112 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
111 while (sta) { 113 while (sta) {
112 if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 114 if (sta->sdata == sdata &&
115 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
116 break;
117 sta = rcu_dereference(sta->hnext);
118 }
119 return sta;
120}
121
122/*
123 * Get sta info either from the specified interface
124 * or from one of its vlans
125 */
126struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
127 const u8 *addr)
128{
129 struct ieee80211_local *local = sdata->local;
130 struct sta_info *sta;
131
132 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
133 while (sta) {
134 if ((sta->sdata == sdata ||
135 sta->sdata->bss == sdata->bss) &&
136 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
113 break; 137 break;
114 sta = rcu_dereference(sta->hnext); 138 sta = rcu_dereference(sta->hnext);
115 } 139 }
@@ -356,6 +380,7 @@ int sta_info_insert(struct sta_info *sta)
356{ 380{
357 struct ieee80211_local *local = sta->local; 381 struct ieee80211_local *local = sta->local;
358 struct ieee80211_sub_if_data *sdata = sta->sdata; 382 struct ieee80211_sub_if_data *sdata = sta->sdata;
383 struct station_info sinfo;
359 unsigned long flags; 384 unsigned long flags;
360 int err = 0; 385 int err = 0;
361 386
@@ -364,12 +389,12 @@ int sta_info_insert(struct sta_info *sta)
364 * something inserts a STA (on one CPU) without holding the RTNL 389 * something inserts a STA (on one CPU) without holding the RTNL
365 * and another CPU turns off the net device. 390 * and another CPU turns off the net device.
366 */ 391 */
367 if (unlikely(!netif_running(sdata->dev))) { 392 if (unlikely(!ieee80211_sdata_running(sdata))) {
368 err = -ENETDOWN; 393 err = -ENETDOWN;
369 goto out_free; 394 goto out_free;
370 } 395 }
371 396
372 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 || 397 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
373 is_multicast_ether_addr(sta->sta.addr))) { 398 is_multicast_ether_addr(sta->sta.addr))) {
374 err = -EINVAL; 399 err = -EINVAL;
375 goto out_free; 400 goto out_free;
@@ -377,7 +402,7 @@ int sta_info_insert(struct sta_info *sta)
377 402
378 spin_lock_irqsave(&local->sta_lock, flags); 403 spin_lock_irqsave(&local->sta_lock, flags);
379 /* check if STA exists already */ 404 /* check if STA exists already */
380 if (sta_info_get(local, sta->sta.addr)) { 405 if (sta_info_get(sdata, sta->sta.addr)) {
381 spin_unlock_irqrestore(&local->sta_lock, flags); 406 spin_unlock_irqrestore(&local->sta_lock, flags);
382 err = -EEXIST; 407 err = -EEXIST;
383 goto out_free; 408 goto out_free;
@@ -394,7 +419,7 @@ int sta_info_insert(struct sta_info *sta)
394 struct ieee80211_sub_if_data, 419 struct ieee80211_sub_if_data,
395 u.ap); 420 u.ap);
396 421
397 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta); 422 drv_sta_notify(local, sdata, STA_NOTIFY_ADD, &sta->sta);
398 sdata = sta->sdata; 423 sdata = sta->sdata;
399 } 424 }
400 425
@@ -405,6 +430,10 @@ int sta_info_insert(struct sta_info *sta)
405 430
406 spin_unlock_irqrestore(&local->sta_lock, flags); 431 spin_unlock_irqrestore(&local->sta_lock, flags);
407 432
433 sinfo.filled = 0;
434 sinfo.generation = local->sta_generation;
435 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_ATOMIC);
436
408#ifdef CONFIG_MAC80211_DEBUGFS 437#ifdef CONFIG_MAC80211_DEBUGFS
409 /* 438 /*
410 * Debugfs entry adding might sleep, so schedule process 439 * Debugfs entry adding might sleep, so schedule process
@@ -534,7 +563,7 @@ static void __sta_info_unlink(struct sta_info **sta)
534 struct ieee80211_sub_if_data, 563 struct ieee80211_sub_if_data,
535 u.ap); 564 u.ap);
536 565
537 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE, 566 drv_sta_notify(local, sdata, STA_NOTIFY_REMOVE,
538 &(*sta)->sta); 567 &(*sta)->sta);
539 sdata = (*sta)->sdata; 568 sdata = (*sta)->sdata;
540 } 569 }
@@ -828,7 +857,7 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
828 if (time_after(jiffies, sta->last_rx + exp_time)) { 857 if (time_after(jiffies, sta->last_rx + exp_time)) {
829#ifdef CONFIG_MAC80211_IBSS_DEBUG 858#ifdef CONFIG_MAC80211_IBSS_DEBUG
830 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n", 859 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
831 sdata->dev->name, sta->sta.addr); 860 sdata->name, sta->sta.addr);
832#endif 861#endif
833 __sta_info_unlink(&sta); 862 __sta_info_unlink(&sta);
834 if (sta) 863 if (sta)
@@ -843,11 +872,12 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
843struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw, 872struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
844 const u8 *addr) 873 const u8 *addr)
845{ 874{
846 struct sta_info *sta = sta_info_get(hw_to_local(hw), addr); 875 struct sta_info *sta, *nxt;
847 876
848 if (!sta) 877 /* Just return a random station ... first in list ... */
849 return NULL; 878 for_each_sta_info(hw_to_local(hw), addr, sta, nxt)
850 return &sta->sta; 879 return &sta->sta;
880 return NULL;
851} 881}
852EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw); 882EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
853 883
@@ -872,7 +902,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
872 struct ieee80211_local *local = sdata->local; 902 struct ieee80211_local *local = sdata->local;
873 int sent, buffered; 903 int sent, buffered;
874 904
875 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta); 905 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
876 906
877 if (!skb_queue_empty(&sta->ps_tx_buf)) 907 if (!skb_queue_empty(&sta->ps_tx_buf))
878 sta_info_clear_tim_bit(sta); 908 sta_info_clear_tim_bit(sta);
@@ -885,7 +915,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
885 915
886#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 916#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
887 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " 917 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
888 "since STA not sleeping anymore\n", sdata->dev->name, 918 "since STA not sleeping anymore\n", sdata->name,
889 sta->sta.addr, sta->sta.aid, sent - buffered, buffered); 919 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
890#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 920#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
891} 921}
@@ -944,7 +974,7 @@ void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
944 */ 974 */
945 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even " 975 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
946 "though there are no buffered frames for it\n", 976 "though there are no buffered frames for it\n",
947 sdata->dev->name, sta->sta.addr); 977 sdata->name, sta->sta.addr);
948#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 978#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
949 } 979 }
950} 980}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b4810f6aa94..6f79bba5706 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -403,9 +403,37 @@ static inline u32 get_sta_flags(struct sta_info *sta)
403#define STA_INFO_CLEANUP_INTERVAL (10 * HZ) 403#define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
404 404
405/* 405/*
406 * Get a STA info, must have be under RCU read lock. 406 * Get a STA info, must be under RCU read lock.
407 */ 407 */
408struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr); 408struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
409 const u8 *addr);
410
411struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
412 const u8 *addr);
413
414static inline
415void for_each_sta_info_type_check(struct ieee80211_local *local,
416 const u8 *addr,
417 struct sta_info *sta,
418 struct sta_info *nxt)
419{
420}
421
422#define for_each_sta_info(local, _addr, sta, nxt) \
423 for ( /* initialise loop */ \
424 sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
425 nxt = sta ? rcu_dereference(sta->hnext) : NULL; \
426 /* typecheck */ \
427 for_each_sta_info_type_check(local, (_addr), sta, nxt), \
428 /* continue condition */ \
429 sta; \
430 /* advance loop */ \
431 sta = nxt, \
432 nxt = sta ? rcu_dereference(sta->hnext) : NULL \
433 ) \
434 /* compare address and run code only if it matches */ \
435 if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0)
436
409/* 437/*
410 * Get STA info by index, BROKEN! 438 * Get STA info by index, BROKEN!
411 */ 439 */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index d78f36c64c7..0ebcdda2420 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -134,6 +134,40 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
134 dev_kfree_skb(skb); 134 dev_kfree_skb(skb);
135} 135}
136 136
137static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
138{
139 struct ieee80211_mgmt *mgmt = (void *) skb->data;
140 struct ieee80211_local *local = sta->local;
141 struct ieee80211_sub_if_data *sdata = sta->sdata;
142
143 if (ieee80211_is_action(mgmt->frame_control) &&
144 sdata->vif.type == NL80211_IFTYPE_STATION &&
145 mgmt->u.action.category == WLAN_CATEGORY_HT &&
146 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) {
147 /*
148 * This update looks racy, but isn't -- if we come
149 * here we've definitely got a station that we're
150 * talking to, and on a managed interface that can
151 * only be the AP. And the only other place updating
152 * this variable is before we're associated.
153 */
154 switch (mgmt->u.action.u.ht_smps.smps_control) {
155 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
156 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC;
157 break;
158 case WLAN_HT_SMPS_CONTROL_STATIC:
159 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC;
160 break;
161 case WLAN_HT_SMPS_CONTROL_DISABLED:
162 default: /* shouldn't happen since we don't send that */
163 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF;
164 break;
165 }
166
167 ieee80211_queue_work(&local->hw, &local->recalc_smps);
168 }
169}
170
137void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 171void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
138{ 172{
139 struct sk_buff *skb2; 173 struct sk_buff *skb2;
@@ -146,7 +180,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
146 struct ieee80211_tx_status_rtap_hdr *rthdr; 180 struct ieee80211_tx_status_rtap_hdr *rthdr;
147 struct ieee80211_sub_if_data *sdata; 181 struct ieee80211_sub_if_data *sdata;
148 struct net_device *prev_dev = NULL; 182 struct net_device *prev_dev = NULL;
149 struct sta_info *sta; 183 struct sta_info *sta, *tmp;
150 int retry_count = -1, i; 184 int retry_count = -1, i;
151 bool injected; 185 bool injected;
152 186
@@ -166,9 +200,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
166 200
167 sband = local->hw.wiphy->bands[info->band]; 201 sband = local->hw.wiphy->bands[info->band];
168 202
169 sta = sta_info_get(local, hdr->addr1); 203 for_each_sta_info(local, hdr->addr1, sta, tmp) {
204 /* skip wrong virtual interface */
205 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
206 continue;
170 207
171 if (sta) {
172 if (!(info->flags & IEEE80211_TX_STAT_ACK) && 208 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
173 test_sta_flags(sta, WLAN_STA_PS_STA)) { 209 test_sta_flags(sta, WLAN_STA_PS_STA)) {
174 /* 210 /*
@@ -208,6 +244,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
208 rate_control_tx_status(local, sband, sta, skb); 244 rate_control_tx_status(local, sband, sta, skb);
209 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) 245 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
210 ieee80211s_update_metric(local, sta, skb); 246 ieee80211s_update_metric(local, sta, skb);
247
248 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
249 (info->flags & IEEE80211_TX_STAT_ACK))
250 ieee80211_frame_acked(sta, skb);
211 } 251 }
212 252
213 rcu_read_unlock(); 253 rcu_read_unlock();
@@ -311,7 +351,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
311 rcu_read_lock(); 351 rcu_read_lock();
312 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 352 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
313 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { 353 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
314 if (!netif_running(sdata->dev)) 354 if (!ieee80211_sdata_running(sdata))
315 continue; 355 continue;
316 356
317 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && 357 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 4921d724b6c..b73454a507f 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -100,7 +100,7 @@ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
100 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); 100 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; 101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
102 } 102 }
103 ctx->initialized = 1; 103 ctx->state = TKIP_STATE_PHASE1_DONE;
104} 104}
105 105
106static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, 106static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
@@ -183,7 +183,7 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
183 /* Update the p1k only when the iv16 in the packet wraps around, this 183 /* Update the p1k only when the iv16 in the packet wraps around, this
184 * might occur after the wrap around of iv16 in the key in case of 184 * might occur after the wrap around of iv16 in the key in case of
185 * fragmented packets. */ 185 * fragmented packets. */
186 if (iv16 == 0 || !ctx->initialized) 186 if (iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
187 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32); 187 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32);
188 188
189 if (type == IEEE80211_TKIP_P1_KEY) { 189 if (type == IEEE80211_TKIP_P1_KEY) {
@@ -209,7 +209,7 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
209 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; 209 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
210 210
211 /* Calculate per-packet key */ 211 /* Calculate per-packet key */
212 if (ctx->iv16 == 0 || !ctx->initialized) 212 if (ctx->iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
213 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32); 213 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
214 214
215 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key); 215 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
@@ -259,7 +259,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
259 if ((keyid >> 6) != key->conf.keyidx) 259 if ((keyid >> 6) != key->conf.keyidx)
260 return TKIP_DECRYPT_INVALID_KEYIDX; 260 return TKIP_DECRYPT_INVALID_KEYIDX;
261 261
262 if (key->u.tkip.rx[queue].initialized && 262 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
263 (iv32 < key->u.tkip.rx[queue].iv32 || 263 (iv32 < key->u.tkip.rx[queue].iv32 ||
264 (iv32 == key->u.tkip.rx[queue].iv32 && 264 (iv32 == key->u.tkip.rx[queue].iv32 &&
265 iv16 <= key->u.tkip.rx[queue].iv16))) { 265 iv16 <= key->u.tkip.rx[queue].iv16))) {
@@ -275,11 +275,11 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
275 275
276 if (only_iv) { 276 if (only_iv) {
277 res = TKIP_DECRYPT_OK; 277 res = TKIP_DECRYPT_OK;
278 key->u.tkip.rx[queue].initialized = 1; 278 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
279 goto done; 279 goto done;
280 } 280 }
281 281
282 if (!key->u.tkip.rx[queue].initialized || 282 if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT ||
283 key->u.tkip.rx[queue].iv32 != iv32) { 283 key->u.tkip.rx[queue].iv32 != iv32) {
284 /* IV16 wrapped around - perform TKIP phase 1 */ 284 /* IV16 wrapped around - perform TKIP phase 1 */
285 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); 285 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
@@ -299,18 +299,20 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
299 printk("\n"); 299 printk("\n");
300 } 300 }
301#endif 301#endif
302 if (key->local->ops->update_tkip_key && 302 }
303 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 303 if (key->local->ops->update_tkip_key &&
304 static const u8 bcast[ETH_ALEN] = 304 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
305 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 305 key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) {
306 const u8 *sta_addr = key->sta->sta.addr; 306 static const u8 bcast[ETH_ALEN] =
307 307 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
308 if (is_multicast_ether_addr(ra)) 308 const u8 *sta_addr = key->sta->sta.addr;
309 sta_addr = bcast; 309
310 310 if (is_multicast_ether_addr(ra))
311 drv_update_tkip_key(key->local, &key->conf, sta_addr, 311 sta_addr = bcast;
312 iv32, key->u.tkip.rx[queue].p1k); 312
313 } 313 drv_update_tkip_key(key->local, &key->conf, sta_addr,
314 iv32, key->u.tkip.rx[queue].p1k);
315 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
314 } 316 }
315 317
316 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); 318 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ac210b58670..daf81048c1f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -180,6 +180,71 @@ static int inline is_ieee80211_device(struct ieee80211_local *local,
180} 180}
181 181
182/* tx handlers */ 182/* tx handlers */
183static ieee80211_tx_result debug_noinline
184ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
185{
186 struct ieee80211_local *local = tx->local;
187 struct ieee80211_if_managed *ifmgd;
188
189 /* driver doesn't support power save */
190 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
191 return TX_CONTINUE;
192
193 /* hardware does dynamic power save */
194 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
195 return TX_CONTINUE;
196
197 /* dynamic power save disabled */
198 if (local->hw.conf.dynamic_ps_timeout <= 0)
199 return TX_CONTINUE;
200
201 /* we are scanning, don't enable power save */
202 if (local->scanning)
203 return TX_CONTINUE;
204
205 if (!local->ps_sdata)
206 return TX_CONTINUE;
207
208 /* No point if we're going to suspend */
209 if (local->quiescing)
210 return TX_CONTINUE;
211
212 /* dynamic ps is supported only in managed mode */
213 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
214 return TX_CONTINUE;
215
216 ifmgd = &tx->sdata->u.mgd;
217
218 /*
219 * Don't wakeup from power save if u-apsd is enabled, voip ac has
220 * u-apsd enabled and the frame is in voip class. This effectively
221 * means that even if all access categories have u-apsd enabled, in
222 * practise u-apsd is only used with the voip ac. This is a
223 * workaround for the case when received voip class packets do not
224 * have correct qos tag for some reason, due the network or the
225 * peer application.
226 *
227 * Note: local->uapsd_queues access is racy here. If the value is
228 * changed via debugfs, user needs to reassociate manually to have
229 * everything in sync.
230 */
231 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
232 && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
233 && skb_get_queue_mapping(tx->skb) == 0)
234 return TX_CONTINUE;
235
236 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
237 ieee80211_stop_queues_by_reason(&local->hw,
238 IEEE80211_QUEUE_STOP_REASON_PS);
239 ieee80211_queue_work(&local->hw,
240 &local->dynamic_ps_disable_work);
241 }
242
243 mod_timer(&local->dynamic_ps_timer, jiffies +
244 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
245
246 return TX_CONTINUE;
247}
183 248
184static ieee80211_tx_result debug_noinline 249static ieee80211_tx_result debug_noinline
185ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 250ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
@@ -223,7 +288,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
223#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 288#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
224 printk(KERN_DEBUG "%s: dropped data frame to not " 289 printk(KERN_DEBUG "%s: dropped data frame to not "
225 "associated station %pM\n", 290 "associated station %pM\n",
226 tx->dev->name, hdr->addr1); 291 tx->sdata->name, hdr->addr1);
227#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 292#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
228 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); 293 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
229 return TX_DROP; 294 return TX_DROP;
@@ -331,7 +396,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
331#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 396#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
332 if (net_ratelimit()) 397 if (net_ratelimit())
333 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", 398 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
334 tx->dev->name); 399 tx->sdata->name);
335#endif 400#endif
336 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 401 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
337 } else 402 } else
@@ -391,7 +456,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
391 if (net_ratelimit()) { 456 if (net_ratelimit()) {
392 printk(KERN_DEBUG "%s: STA %pM TX " 457 printk(KERN_DEBUG "%s: STA %pM TX "
393 "buffer full - dropping oldest frame\n", 458 "buffer full - dropping oldest frame\n",
394 tx->dev->name, sta->sta.addr); 459 tx->sdata->name, sta->sta.addr);
395 } 460 }
396#endif 461#endif
397 dev_kfree_skb(old); 462 dev_kfree_skb(old);
@@ -416,7 +481,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
416#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 481#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
417 else if (unlikely(staflags & WLAN_STA_PS_STA)) { 482 else if (unlikely(staflags & WLAN_STA_PS_STA)) {
418 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " 483 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
419 "set -> send frame\n", tx->dev->name, 484 "set -> send frame\n", tx->sdata->name,
420 sta->sta.addr); 485 sta->sta.addr);
421 } 486 }
422#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 487#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
@@ -519,7 +584,12 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
519 txrc.bss_conf = &tx->sdata->vif.bss_conf; 584 txrc.bss_conf = &tx->sdata->vif.bss_conf;
520 txrc.skb = tx->skb; 585 txrc.skb = tx->skb;
521 txrc.reported_rate.idx = -1; 586 txrc.reported_rate.idx = -1;
522 txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx; 587 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
588 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
589 txrc.max_rate_idx = -1;
590 else
591 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
592 txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP;
523 593
524 /* set up RTS protection if desired */ 594 /* set up RTS protection if desired */
525 if (len > tx->local->hw.wiphy->rts_threshold) { 595 if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -549,7 +619,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
549 "%s: Dropped data frame as no usable bitrate found while " 619 "%s: Dropped data frame as no usable bitrate found while "
550 "scanning and associated. Target station: " 620 "scanning and associated. Target station: "
551 "%pM on %d GHz band\n", 621 "%pM on %d GHz band\n",
552 tx->dev->name, hdr->addr1, 622 tx->sdata->name, hdr->addr1,
553 tx->channel->band ? 5 : 2)) 623 tx->channel->band ? 5 : 2))
554 return TX_DROP; 624 return TX_DROP;
555 625
@@ -1021,7 +1091,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1021 1091
1022 memset(tx, 0, sizeof(*tx)); 1092 memset(tx, 0, sizeof(*tx));
1023 tx->skb = skb; 1093 tx->skb = skb;
1024 tx->dev = sdata->dev; /* use original interface */
1025 tx->local = local; 1094 tx->local = local;
1026 tx->sdata = sdata; 1095 tx->sdata = sdata;
1027 tx->channel = local->hw.conf.channel; 1096 tx->channel = local->hw.conf.channel;
@@ -1052,10 +1121,13 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1052 1121
1053 hdr = (struct ieee80211_hdr *) skb->data; 1122 hdr = (struct ieee80211_hdr *) skb->data;
1054 1123
1055 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1124 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
1056 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1125 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1126 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1127 return TX_DROP;
1128 }
1057 if (!tx->sta) 1129 if (!tx->sta)
1058 tx->sta = sta_info_get(local, hdr->addr1); 1130 tx->sta = sta_info_get(sdata, hdr->addr1);
1059 1131
1060 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1132 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1061 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1133 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
@@ -1216,6 +1288,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1216 goto txh_done; \ 1288 goto txh_done; \
1217 } while (0) 1289 } while (0)
1218 1290
1291 CALL_TXH(ieee80211_tx_h_dynamic_ps);
1219 CALL_TXH(ieee80211_tx_h_check_assoc); 1292 CALL_TXH(ieee80211_tx_h_check_assoc);
1220 CALL_TXH(ieee80211_tx_h_ps_buf); 1293 CALL_TXH(ieee80211_tx_h_ps_buf);
1221 CALL_TXH(ieee80211_tx_h_select_key); 1294 CALL_TXH(ieee80211_tx_h_select_key);
@@ -1398,34 +1471,6 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1398 return 0; 1471 return 0;
1399} 1472}
1400 1473
1401static bool need_dynamic_ps(struct ieee80211_local *local)
1402{
1403 /* driver doesn't support power save */
1404 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
1405 return false;
1406
1407 /* hardware does dynamic power save */
1408 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1409 return false;
1410
1411 /* dynamic power save disabled */
1412 if (local->hw.conf.dynamic_ps_timeout <= 0)
1413 return false;
1414
1415 /* we are scanning, don't enable power save */
1416 if (local->scanning)
1417 return false;
1418
1419 if (!local->ps_sdata)
1420 return false;
1421
1422 /* No point if we're going to suspend */
1423 if (local->quiescing)
1424 return false;
1425
1426 return true;
1427}
1428
1429static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, 1474static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1430 struct sk_buff *skb) 1475 struct sk_buff *skb)
1431{ 1476{
@@ -1436,18 +1481,6 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1436 int headroom; 1481 int headroom;
1437 bool may_encrypt; 1482 bool may_encrypt;
1438 1483
1439 if (need_dynamic_ps(local)) {
1440 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1441 ieee80211_stop_queues_by_reason(&local->hw,
1442 IEEE80211_QUEUE_STOP_REASON_PS);
1443 ieee80211_queue_work(&local->hw,
1444 &local->dynamic_ps_disable_work);
1445 }
1446
1447 mod_timer(&local->dynamic_ps_timer, jiffies +
1448 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1449 }
1450
1451 rcu_read_lock(); 1484 rcu_read_lock();
1452 1485
1453 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) { 1486 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
@@ -1474,11 +1507,11 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1474 1507
1475 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, 1508 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1476 list) { 1509 list) {
1477 if (!netif_running(tmp_sdata->dev)) 1510 if (!ieee80211_sdata_running(tmp_sdata))
1478 continue; 1511 continue;
1479 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) 1512 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
1480 continue; 1513 continue;
1481 if (compare_ether_addr(tmp_sdata->dev->dev_addr, 1514 if (compare_ether_addr(tmp_sdata->vif.addr,
1482 hdr->addr2) == 0) { 1515 hdr->addr2) == 0) {
1483 sdata = tmp_sdata; 1516 sdata = tmp_sdata;
1484 break; 1517 break;
@@ -1642,7 +1675,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1642 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1675 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1643 /* RA TA DA SA */ 1676 /* RA TA DA SA */
1644 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); 1677 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
1645 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1678 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1646 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1679 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1647 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1680 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1648 hdrlen = 30; 1681 hdrlen = 30;
@@ -1656,7 +1689,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1656 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1689 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1657 /* DA BSSID SA */ 1690 /* DA BSSID SA */
1658 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1691 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1659 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1692 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1660 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1693 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1661 hdrlen = 24; 1694 hdrlen = 24;
1662 break; 1695 break;
@@ -1664,7 +1697,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1664 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1697 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1665 /* RA TA DA SA */ 1698 /* RA TA DA SA */
1666 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); 1699 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1667 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1700 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1668 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1701 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1669 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1702 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1670 hdrlen = 30; 1703 hdrlen = 30;
@@ -1678,8 +1711,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1678 goto fail; 1711 goto fail;
1679 } 1712 }
1680 1713
1681 if (compare_ether_addr(dev->dev_addr, 1714 if (compare_ether_addr(sdata->vif.addr,
1682 skb->data + ETH_ALEN) == 0) { 1715 skb->data + ETH_ALEN) == 0) {
1683 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1716 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1684 skb->data, skb->data + ETH_ALEN); 1717 skb->data, skb->data + ETH_ALEN);
1685 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1718 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
@@ -1709,7 +1742,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1709 } 1742 }
1710 } 1743 }
1711 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1744 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1712 mesh_da, dev->dev_addr); 1745 mesh_da, sdata->vif.addr);
1713 rcu_read_unlock(); 1746 rcu_read_unlock();
1714 if (is_mesh_mcast) 1747 if (is_mesh_mcast)
1715 meshhdrlen = 1748 meshhdrlen =
@@ -1734,7 +1767,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1734 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) { 1767 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
1735 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1768 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1736 /* RA TA DA SA */ 1769 /* RA TA DA SA */
1737 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1770 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1738 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1771 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1739 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1772 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1740 hdrlen = 30; 1773 hdrlen = 30;
@@ -1765,9 +1798,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1765 */ 1798 */
1766 if (!is_multicast_ether_addr(hdr.addr1)) { 1799 if (!is_multicast_ether_addr(hdr.addr1)) {
1767 rcu_read_lock(); 1800 rcu_read_lock();
1768 sta = sta_info_get(local, hdr.addr1); 1801 sta = sta_info_get(sdata, hdr.addr1);
1769 /* XXX: in the future, use sdata to look up the sta */ 1802 if (sta)
1770 if (sta && sta->sdata == sdata)
1771 sta_flags = get_sta_flags(sta); 1803 sta_flags = get_sta_flags(sta);
1772 rcu_read_unlock(); 1804 rcu_read_unlock();
1773 } 1805 }
@@ -1786,7 +1818,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1786 unlikely(!is_multicast_ether_addr(hdr.addr1) && 1818 unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1787 !(sta_flags & WLAN_STA_AUTHORIZED) && 1819 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1788 !(ethertype == ETH_P_PAE && 1820 !(ethertype == ETH_P_PAE &&
1789 compare_ether_addr(dev->dev_addr, 1821 compare_ether_addr(sdata->vif.addr,
1790 skb->data + ETH_ALEN) == 0))) { 1822 skb->data + ETH_ALEN) == 0))) {
1791#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1823#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1792 if (net_ratelimit()) 1824 if (net_ratelimit())
@@ -1926,7 +1958,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1926 ieee80211_tx(sdata, skb, true); 1958 ieee80211_tx(sdata, skb, true);
1927 } else { 1959 } else {
1928 hdr = (struct ieee80211_hdr *)skb->data; 1960 hdr = (struct ieee80211_hdr *)skb->data;
1929 sta = sta_info_get(local, hdr->addr1); 1961 sta = sta_info_get(sdata, hdr->addr1);
1930 1962
1931 ret = __ieee80211_tx(local, &skb, sta, true); 1963 ret = __ieee80211_tx(local, &skb, sta, true);
1932 if (ret != IEEE80211_TX_OK) 1964 if (ret != IEEE80211_TX_OK)
@@ -2062,6 +2094,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2062 struct beacon_data *beacon; 2094 struct beacon_data *beacon;
2063 struct ieee80211_supported_band *sband; 2095 struct ieee80211_supported_band *sband;
2064 enum ieee80211_band band = local->hw.conf.channel->band; 2096 enum ieee80211_band band = local->hw.conf.channel->band;
2097 struct ieee80211_tx_rate_control txrc;
2065 2098
2066 sband = local->hw.wiphy->bands[band]; 2099 sband = local->hw.wiphy->bands[band];
2067 2100
@@ -2150,8 +2183,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2150 mgmt->frame_control = 2183 mgmt->frame_control =
2151 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); 2184 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2152 memset(mgmt->da, 0xff, ETH_ALEN); 2185 memset(mgmt->da, 0xff, ETH_ALEN);
2153 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 2186 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2154 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 2187 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2155 mgmt->u.beacon.beacon_int = 2188 mgmt->u.beacon.beacon_int =
2156 cpu_to_le16(sdata->vif.bss_conf.beacon_int); 2189 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2157 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2190 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
@@ -2169,21 +2202,25 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2169 info = IEEE80211_SKB_CB(skb); 2202 info = IEEE80211_SKB_CB(skb);
2170 2203
2171 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 2204 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
2205 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2172 info->band = band; 2206 info->band = band;
2173 /* 2207
2174 * XXX: For now, always use the lowest rate 2208 memset(&txrc, 0, sizeof(txrc));
2175 */ 2209 txrc.hw = hw;
2176 info->control.rates[0].idx = 0; 2210 txrc.sband = sband;
2177 info->control.rates[0].count = 1; 2211 txrc.bss_conf = &sdata->vif.bss_conf;
2178 info->control.rates[1].idx = -1; 2212 txrc.skb = skb;
2179 info->control.rates[2].idx = -1; 2213 txrc.reported_rate.idx = -1;
2180 info->control.rates[3].idx = -1; 2214 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
2181 info->control.rates[4].idx = -1; 2215 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
2182 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); 2216 txrc.max_rate_idx = -1;
2217 else
2218 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2219 txrc.ap = true;
2220 rate_control_get_rate(sdata, NULL, &txrc);
2183 2221
2184 info->control.vif = vif; 2222 info->control.vif = vif;
2185 2223
2186 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2187 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 2224 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
2188 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 2225 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
2189 out: 2226 out:
@@ -2192,6 +2229,134 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2192} 2229}
2193EXPORT_SYMBOL(ieee80211_beacon_get_tim); 2230EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2194 2231
2232struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
2233 struct ieee80211_vif *vif)
2234{
2235 struct ieee80211_sub_if_data *sdata;
2236 struct ieee80211_if_managed *ifmgd;
2237 struct ieee80211_pspoll *pspoll;
2238 struct ieee80211_local *local;
2239 struct sk_buff *skb;
2240
2241 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2242 return NULL;
2243
2244 sdata = vif_to_sdata(vif);
2245 ifmgd = &sdata->u.mgd;
2246 local = sdata->local;
2247
2248 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
2249 if (!skb) {
2250 printk(KERN_DEBUG "%s: failed to allocate buffer for "
2251 "pspoll template\n", sdata->name);
2252 return NULL;
2253 }
2254 skb_reserve(skb, local->hw.extra_tx_headroom);
2255
2256 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
2257 memset(pspoll, 0, sizeof(*pspoll));
2258 pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
2259 IEEE80211_STYPE_PSPOLL);
2260 pspoll->aid = cpu_to_le16(ifmgd->aid);
2261
2262 /* aid in PS-Poll has its two MSBs each set to 1 */
2263 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
2264
2265 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
2266 memcpy(pspoll->ta, vif->addr, ETH_ALEN);
2267
2268 return skb;
2269}
2270EXPORT_SYMBOL(ieee80211_pspoll_get);
2271
2272struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
2273 struct ieee80211_vif *vif)
2274{
2275 struct ieee80211_hdr_3addr *nullfunc;
2276 struct ieee80211_sub_if_data *sdata;
2277 struct ieee80211_if_managed *ifmgd;
2278 struct ieee80211_local *local;
2279 struct sk_buff *skb;
2280
2281 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2282 return NULL;
2283
2284 sdata = vif_to_sdata(vif);
2285 ifmgd = &sdata->u.mgd;
2286 local = sdata->local;
2287
2288 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
2289 if (!skb) {
2290 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2291 "template\n", sdata->name);
2292 return NULL;
2293 }
2294 skb_reserve(skb, local->hw.extra_tx_headroom);
2295
2296 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
2297 sizeof(*nullfunc));
2298 memset(nullfunc, 0, sizeof(*nullfunc));
2299 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2300 IEEE80211_STYPE_NULLFUNC |
2301 IEEE80211_FCTL_TODS);
2302 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
2303 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
2304 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
2305
2306 return skb;
2307}
2308EXPORT_SYMBOL(ieee80211_nullfunc_get);
2309
2310struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2311 struct ieee80211_vif *vif,
2312 const u8 *ssid, size_t ssid_len,
2313 const u8 *ie, size_t ie_len)
2314{
2315 struct ieee80211_sub_if_data *sdata;
2316 struct ieee80211_local *local;
2317 struct ieee80211_hdr_3addr *hdr;
2318 struct sk_buff *skb;
2319 size_t ie_ssid_len;
2320 u8 *pos;
2321
2322 sdata = vif_to_sdata(vif);
2323 local = sdata->local;
2324 ie_ssid_len = 2 + ssid_len;
2325
2326 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2327 ie_ssid_len + ie_len);
2328 if (!skb) {
2329 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
2330 "request template\n", sdata->name);
2331 return NULL;
2332 }
2333
2334 skb_reserve(skb, local->hw.extra_tx_headroom);
2335
2336 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
2337 memset(hdr, 0, sizeof(*hdr));
2338 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2339 IEEE80211_STYPE_PROBE_REQ);
2340 memset(hdr->addr1, 0xff, ETH_ALEN);
2341 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
2342 memset(hdr->addr3, 0xff, ETH_ALEN);
2343
2344 pos = skb_put(skb, ie_ssid_len);
2345 *pos++ = WLAN_EID_SSID;
2346 *pos++ = ssid_len;
2347 if (ssid)
2348 memcpy(pos, ssid, ssid_len);
2349 pos += ssid_len;
2350
2351 if (ie) {
2352 pos = skb_put(skb, ie_len);
2353 memcpy(pos, ie, ie_len);
2354 }
2355
2356 return skb;
2357}
2358EXPORT_SYMBOL(ieee80211_probereq_get);
2359
2195void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2360void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2196 const void *frame, size_t frame_len, 2361 const void *frame, size_t frame_len,
2197 const struct ieee80211_tx_info *frame_txctl, 2362 const struct ieee80211_tx_info *frame_txctl,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3848140313f..ca170b417da 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -18,7 +18,6 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/wireless.h>
22#include <linux/bitmap.h> 21#include <linux/bitmap.h>
23#include <linux/crc32.h> 22#include <linux/crc32.h>
24#include <net/net_namespace.h> 23#include <net/net_namespace.h>
@@ -480,8 +479,8 @@ void ieee80211_iterate_active_interfaces(
480 case NL80211_IFTYPE_MESH_POINT: 479 case NL80211_IFTYPE_MESH_POINT:
481 break; 480 break;
482 } 481 }
483 if (netif_running(sdata->dev)) 482 if (ieee80211_sdata_running(sdata))
484 iterator(data, sdata->dev->dev_addr, 483 iterator(data, sdata->vif.addr,
485 &sdata->vif); 484 &sdata->vif);
486 } 485 }
487 486
@@ -514,8 +513,8 @@ void ieee80211_iterate_active_interfaces_atomic(
514 case NL80211_IFTYPE_MESH_POINT: 513 case NL80211_IFTYPE_MESH_POINT:
515 break; 514 break;
516 } 515 }
517 if (netif_running(sdata->dev)) 516 if (ieee80211_sdata_running(sdata))
518 iterator(data, sdata->dev->dev_addr, 517 iterator(data, sdata->vif.addr,
519 &sdata->vif); 518 &sdata->vif);
520 } 519 }
521 520
@@ -793,6 +792,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
793 break; 792 break;
794 } 793 }
795 794
795 qparam.uapsd = false;
796
796 drv_conf_tx(local, queue, &qparam); 797 drv_conf_tx(local, queue, &qparam);
797 } 798 }
798} 799}
@@ -860,7 +861,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
860 sizeof(*mgmt) + 6 + extra_len); 861 sizeof(*mgmt) + 6 + extra_len);
861 if (!skb) { 862 if (!skb) {
862 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 863 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
863 "frame\n", sdata->dev->name); 864 "frame\n", sdata->name);
864 return; 865 return;
865 } 866 }
866 skb_reserve(skb, local->hw.extra_tx_headroom); 867 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -870,7 +871,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
870 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 871 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
871 IEEE80211_STYPE_AUTH); 872 IEEE80211_STYPE_AUTH);
872 memcpy(mgmt->da, bssid, ETH_ALEN); 873 memcpy(mgmt->da, bssid, ETH_ALEN);
873 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 874 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
874 memcpy(mgmt->bssid, bssid, ETH_ALEN); 875 memcpy(mgmt->bssid, bssid, ETH_ALEN);
875 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); 876 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
876 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); 877 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
@@ -893,43 +894,87 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
893 enum ieee80211_band band) 894 enum ieee80211_band band)
894{ 895{
895 struct ieee80211_supported_band *sband; 896 struct ieee80211_supported_band *sband;
896 u8 *pos, *supp_rates_len, *esupp_rates_len = NULL; 897 u8 *pos;
897 int i; 898 size_t offset = 0, noffset;
899 int supp_rates_len, i;
898 900
899 sband = local->hw.wiphy->bands[band]; 901 sband = local->hw.wiphy->bands[band];
900 902
901 pos = buffer; 903 pos = buffer;
902 904
905 supp_rates_len = min_t(int, sband->n_bitrates, 8);
906
903 *pos++ = WLAN_EID_SUPP_RATES; 907 *pos++ = WLAN_EID_SUPP_RATES;
904 supp_rates_len = pos; 908 *pos++ = supp_rates_len;
905 *pos++ = 0; 909
906 910 for (i = 0; i < supp_rates_len; i++) {
907 for (i = 0; i < sband->n_bitrates; i++) { 911 int rate = sband->bitrates[i].bitrate;
908 struct ieee80211_rate *rate = &sband->bitrates[i]; 912 *pos++ = (u8) (rate / 5);
909 913 }
910 if (esupp_rates_len) { 914
911 *esupp_rates_len += 1; 915 /* insert "request information" if in custom IEs */
912 } else if (*supp_rates_len == 8) { 916 if (ie && ie_len) {
913 *pos++ = WLAN_EID_EXT_SUPP_RATES; 917 static const u8 before_extrates[] = {
914 esupp_rates_len = pos; 918 WLAN_EID_SSID,
915 *pos++ = 1; 919 WLAN_EID_SUPP_RATES,
916 } else 920 WLAN_EID_REQUEST,
917 *supp_rates_len += 1; 921 };
922 noffset = ieee80211_ie_split(ie, ie_len,
923 before_extrates,
924 ARRAY_SIZE(before_extrates),
925 offset);
926 memcpy(pos, ie + offset, noffset - offset);
927 pos += noffset - offset;
928 offset = noffset;
929 }
930
931 if (sband->n_bitrates > i) {
932 *pos++ = WLAN_EID_EXT_SUPP_RATES;
933 *pos++ = sband->n_bitrates - i;
918 934
919 *pos++ = rate->bitrate / 5; 935 for (; i < sband->n_bitrates; i++) {
936 int rate = sband->bitrates[i].bitrate;
937 *pos++ = (u8) (rate / 5);
938 }
939 }
940
941 /* insert custom IEs that go before HT */
942 if (ie && ie_len) {
943 static const u8 before_ht[] = {
944 WLAN_EID_SSID,
945 WLAN_EID_SUPP_RATES,
946 WLAN_EID_REQUEST,
947 WLAN_EID_EXT_SUPP_RATES,
948 WLAN_EID_DS_PARAMS,
949 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
950 };
951 noffset = ieee80211_ie_split(ie, ie_len,
952 before_ht, ARRAY_SIZE(before_ht),
953 offset);
954 memcpy(pos, ie + offset, noffset - offset);
955 pos += noffset - offset;
956 offset = noffset;
920 } 957 }
921 958
922 if (sband->ht_cap.ht_supported) { 959 if (sband->ht_cap.ht_supported) {
923 __le16 tmp = cpu_to_le16(sband->ht_cap.cap); 960 u16 cap = sband->ht_cap.cap;
961 __le16 tmp;
962
963 if (ieee80211_disable_40mhz_24ghz &&
964 sband->band == IEEE80211_BAND_2GHZ) {
965 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
966 cap &= ~IEEE80211_HT_CAP_SGI_40;
967 }
924 968
925 *pos++ = WLAN_EID_HT_CAPABILITY; 969 *pos++ = WLAN_EID_HT_CAPABILITY;
926 *pos++ = sizeof(struct ieee80211_ht_cap); 970 *pos++ = sizeof(struct ieee80211_ht_cap);
927 memset(pos, 0, sizeof(struct ieee80211_ht_cap)); 971 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
972 tmp = cpu_to_le16(cap);
928 memcpy(pos, &tmp, sizeof(u16)); 973 memcpy(pos, &tmp, sizeof(u16));
929 pos += sizeof(u16); 974 pos += sizeof(u16);
930 /* TODO: needs a define here for << 2 */
931 *pos++ = sband->ht_cap.ampdu_factor | 975 *pos++ = sband->ht_cap.ampdu_factor |
932 (sband->ht_cap.ampdu_density << 2); 976 (sband->ht_cap.ampdu_density <<
977 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
933 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); 978 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
934 pos += sizeof(sband->ht_cap.mcs); 979 pos += sizeof(sband->ht_cap.mcs);
935 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */ 980 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
@@ -940,9 +985,11 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
940 * that calculates local->scan_ies_len. 985 * that calculates local->scan_ies_len.
941 */ 986 */
942 987
943 if (ie) { 988 /* add any remaining custom IEs */
944 memcpy(pos, ie, ie_len); 989 if (ie && ie_len) {
945 pos += ie_len; 990 noffset = ie_len;
991 memcpy(pos, ie + offset, noffset - offset);
992 pos += noffset - offset;
946 } 993 }
947 994
948 return pos - buffer; 995 return pos - buffer;
@@ -955,40 +1002,33 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
955 struct ieee80211_local *local = sdata->local; 1002 struct ieee80211_local *local = sdata->local;
956 struct sk_buff *skb; 1003 struct sk_buff *skb;
957 struct ieee80211_mgmt *mgmt; 1004 struct ieee80211_mgmt *mgmt;
958 u8 *pos; 1005 size_t buf_len;
959 1006 u8 *buf;
960 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + 1007
961 ie_len); 1008 /* FIXME: come up with a proper value */
962 if (!skb) { 1009 buf = kmalloc(200 + ie_len, GFP_KERNEL);
963 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 1010 if (!buf) {
964 "request\n", sdata->dev->name); 1011 printk(KERN_DEBUG "%s: failed to allocate temporary IE "
1012 "buffer\n", sdata->name);
965 return; 1013 return;
966 } 1014 }
967 skb_reserve(skb, local->hw.extra_tx_headroom);
968 1015
969 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 1016 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
970 memset(mgmt, 0, 24); 1017 local->hw.conf.channel->band);
971 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1018
972 IEEE80211_STYPE_PROBE_REQ); 1019 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
973 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 1020 ssid, ssid_len,
1021 buf, buf_len);
1022
974 if (dst) { 1023 if (dst) {
1024 mgmt = (struct ieee80211_mgmt *) skb->data;
975 memcpy(mgmt->da, dst, ETH_ALEN); 1025 memcpy(mgmt->da, dst, ETH_ALEN);
976 memcpy(mgmt->bssid, dst, ETH_ALEN); 1026 memcpy(mgmt->bssid, dst, ETH_ALEN);
977 } else {
978 memset(mgmt->da, 0xff, ETH_ALEN);
979 memset(mgmt->bssid, 0xff, ETH_ALEN);
980 } 1027 }
981 pos = skb_put(skb, 2 + ssid_len);
982 *pos++ = WLAN_EID_SSID;
983 *pos++ = ssid_len;
984 memcpy(pos, ssid, ssid_len);
985 pos += ssid_len;
986
987 skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len,
988 local->hw.conf.channel->band));
989 1028
990 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1029 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
991 ieee80211_tx_skb(sdata, skb); 1030 ieee80211_tx_skb(sdata, skb);
1031 kfree(buf);
992} 1032}
993 1033
994u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1034u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1032,16 +1072,15 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1032 ieee80211_led_radio(local, false); 1072 ieee80211_led_radio(local, false);
1033 1073
1034 cancel_work_sync(&local->reconfig_filter); 1074 cancel_work_sync(&local->reconfig_filter);
1035 drv_stop(local);
1036 1075
1037 flush_workqueue(local->workqueue); 1076 flush_workqueue(local->workqueue);
1077 drv_stop(local);
1038} 1078}
1039 1079
1040int ieee80211_reconfig(struct ieee80211_local *local) 1080int ieee80211_reconfig(struct ieee80211_local *local)
1041{ 1081{
1042 struct ieee80211_hw *hw = &local->hw; 1082 struct ieee80211_hw *hw = &local->hw;
1043 struct ieee80211_sub_if_data *sdata; 1083 struct ieee80211_sub_if_data *sdata;
1044 struct ieee80211_if_init_conf conf;
1045 struct sta_info *sta; 1084 struct sta_info *sta;
1046 unsigned long flags; 1085 unsigned long flags;
1047 int res; 1086 int res;
@@ -1061,7 +1100,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1061 if (res) { 1100 if (res) {
1062 WARN(local->suspended, "Harware became unavailable " 1101 WARN(local->suspended, "Harware became unavailable "
1063 "upon resume. This is could be a software issue" 1102 "upon resume. This is could be a software issue"
1064 "prior to suspend or a harware issue\n"); 1103 "prior to suspend or a hardware issue\n");
1065 return res; 1104 return res;
1066 } 1105 }
1067 1106
@@ -1072,12 +1111,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1072 list_for_each_entry(sdata, &local->interfaces, list) { 1111 list_for_each_entry(sdata, &local->interfaces, list) {
1073 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1112 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1074 sdata->vif.type != NL80211_IFTYPE_MONITOR && 1113 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
1075 netif_running(sdata->dev)) { 1114 ieee80211_sdata_running(sdata))
1076 conf.vif = &sdata->vif; 1115 res = drv_add_interface(local, &sdata->vif);
1077 conf.type = sdata->vif.type;
1078 conf.mac_addr = sdata->dev->dev_addr;
1079 res = drv_add_interface(local, &conf);
1080 }
1081 } 1116 }
1082 1117
1083 /* add STAs back */ 1118 /* add STAs back */
@@ -1090,7 +1125,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1090 struct ieee80211_sub_if_data, 1125 struct ieee80211_sub_if_data,
1091 u.ap); 1126 u.ap);
1092 1127
1093 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, 1128 drv_sta_notify(local, sdata, STA_NOTIFY_ADD,
1094 &sta->sta); 1129 &sta->sta);
1095 } 1130 }
1096 spin_unlock_irqrestore(&local->sta_lock, flags); 1131 spin_unlock_irqrestore(&local->sta_lock, flags);
@@ -1119,7 +1154,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1119 /* Finally also reconfigure all the BSS information */ 1154 /* Finally also reconfigure all the BSS information */
1120 list_for_each_entry(sdata, &local->interfaces, list) { 1155 list_for_each_entry(sdata, &local->interfaces, list) {
1121 u32 changed = ~0; 1156 u32 changed = ~0;
1122 if (!netif_running(sdata->dev)) 1157 if (!ieee80211_sdata_running(sdata))
1123 continue; 1158 continue;
1124 switch (sdata->vif.type) { 1159 switch (sdata->vif.type) {
1125 case NL80211_IFTYPE_STATION: 1160 case NL80211_IFTYPE_STATION:
@@ -1147,7 +1182,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1147 1182
1148 /* add back keys */ 1183 /* add back keys */
1149 list_for_each_entry(sdata, &local->interfaces, list) 1184 list_for_each_entry(sdata, &local->interfaces, list)
1150 if (netif_running(sdata->dev)) 1185 if (ieee80211_sdata_running(sdata))
1151 ieee80211_enable_keys(sdata); 1186 ieee80211_enable_keys(sdata);
1152 1187
1153 ieee80211_wake_queues_by_reason(hw, 1188 ieee80211_wake_queues_by_reason(hw,
@@ -1194,3 +1229,133 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1194 return 0; 1229 return 0;
1195} 1230}
1196 1231
1232static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
1233 enum ieee80211_smps_mode *smps_mode)
1234{
1235 if (ifmgd->associated) {
1236 *smps_mode = ifmgd->ap_smps;
1237
1238 if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) {
1239 if (ifmgd->powersave)
1240 *smps_mode = IEEE80211_SMPS_DYNAMIC;
1241 else
1242 *smps_mode = IEEE80211_SMPS_OFF;
1243 }
1244
1245 return 1;
1246 }
1247
1248 return 0;
1249}
1250
1251/* must hold iflist_mtx */
1252void ieee80211_recalc_smps(struct ieee80211_local *local,
1253 struct ieee80211_sub_if_data *forsdata)
1254{
1255 struct ieee80211_sub_if_data *sdata;
1256 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
1257 int count = 0;
1258
1259 if (forsdata)
1260 WARN_ON(!mutex_is_locked(&forsdata->u.mgd.mtx));
1261
1262 WARN_ON(!mutex_is_locked(&local->iflist_mtx));
1263
1264 /*
1265 * This function could be improved to handle multiple
1266 * interfaces better, but right now it makes any
1267 * non-station interfaces force SM PS to be turned
1268 * off. If there are multiple station interfaces it
1269 * could also use the best possible mode, e.g. if
1270 * one is in static and the other in dynamic then
1271 * dynamic is ok.
1272 */
1273
1274 list_for_each_entry(sdata, &local->interfaces, list) {
1275 if (!netif_running(sdata->dev))
1276 continue;
1277 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1278 goto set;
1279 if (sdata != forsdata) {
1280 /*
1281 * This nested is ok -- we are holding the iflist_mtx
1282 * so can't get here twice or so. But it's required
1283 * since normally we acquire it first and then the
1284 * iflist_mtx.
1285 */
1286 mutex_lock_nested(&sdata->u.mgd.mtx, SINGLE_DEPTH_NESTING);
1287 count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
1288 mutex_unlock(&sdata->u.mgd.mtx);
1289 } else
1290 count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
1291
1292 if (count > 1) {
1293 smps_mode = IEEE80211_SMPS_OFF;
1294 break;
1295 }
1296 }
1297
1298 if (smps_mode == local->smps_mode)
1299 return;
1300
1301 set:
1302 local->smps_mode = smps_mode;
1303 /* changed flag is auto-detected for this */
1304 ieee80211_hw_config(local, 0);
1305}
1306
1307static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
1308{
1309 int i;
1310
1311 for (i = 0; i < n_ids; i++)
1312 if (ids[i] == id)
1313 return true;
1314 return false;
1315}
1316
1317/**
1318 * ieee80211_ie_split - split an IE buffer according to ordering
1319 *
1320 * @ies: the IE buffer
1321 * @ielen: the length of the IE buffer
1322 * @ids: an array with element IDs that are allowed before
1323 * the split
1324 * @n_ids: the size of the element ID array
1325 * @offset: offset where to start splitting in the buffer
1326 *
1327 * This function splits an IE buffer by updating the @offset
1328 * variable to point to the location where the buffer should be
1329 * split.
1330 *
1331 * It assumes that the given IE buffer is well-formed, this
1332 * has to be guaranteed by the caller!
1333 *
1334 * It also assumes that the IEs in the buffer are ordered
1335 * correctly, if not the result of using this function will not
1336 * be ordered correctly either, i.e. it does no reordering.
1337 *
1338 * The function returns the offset where the next part of the
1339 * buffer starts, which may be @ielen if the entire (remainder)
1340 * of the buffer should be used.
1341 */
1342size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1343 const u8 *ids, int n_ids, size_t offset)
1344{
1345 size_t pos = offset;
1346
1347 while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos]))
1348 pos += 2 + ies[pos + 1];
1349
1350 return pos;
1351}
1352
1353size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
1354{
1355 size_t pos = offset;
1356
1357 while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC)
1358 pos += 2 + ies[pos + 1];
1359
1360 return pos;
1361}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 79d887dae73..34e6d02da77 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -96,7 +96,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
96 } 96 }
97 97
98 if (!sta && ra && !is_multicast_ether_addr(ra)) { 98 if (!sta && ra && !is_multicast_ether_addr(ra)) {
99 sta = sta_info_get(local, ra); 99 sta = sta_info_get(sdata, ra);
100 if (sta) 100 if (sta)
101 sta_flags = get_sta_flags(sta); 101 sta_flags = get_sta_flags(sta);
102 } 102 }
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
new file mode 100644
index 00000000000..81bd5d592bb
--- /dev/null
+++ b/net/mac80211/work.c
@@ -0,0 +1,1098 @@
1/*
2 * mac80211 work implementation
3 *
4 * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/delay.h>
17#include <linux/if_ether.h>
18#include <linux/skbuff.h>
19#include <linux/if_arp.h>
20#include <linux/etherdevice.h>
21#include <linux/crc32.h>
22#include <net/mac80211.h>
23#include <asm/unaligned.h>
24
25#include "ieee80211_i.h"
26#include "rate.h"
27
28#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
29#define IEEE80211_AUTH_MAX_TRIES 3
30#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
31#define IEEE80211_ASSOC_MAX_TRIES 3
32#define IEEE80211_MAX_PROBE_TRIES 5
33
34enum work_action {
35 WORK_ACT_NONE,
36 WORK_ACT_TIMEOUT,
37 WORK_ACT_DONE,
38};
39
40
41/* utils */
42static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
43{
44 WARN_ON(!mutex_is_locked(&local->work_mtx));
45}
46
47/*
48 * We can have multiple work items (and connection probing)
49 * scheduling this timer, but we need to take care to only
50 * reschedule it when it should fire _earlier_ than it was
51 * asked for before, or if it's not pending right now. This
52 * function ensures that. Note that it then is required to
53 * run this function for all timeouts after the first one
54 * has happened -- the work that runs from this timer will
55 * do that.
56 */
57static void run_again(struct ieee80211_local *local,
58 unsigned long timeout)
59{
60 ASSERT_WORK_MTX(local);
61
62 if (!timer_pending(&local->work_timer) ||
63 time_before(timeout, local->work_timer.expires))
64 mod_timer(&local->work_timer, timeout);
65}
66
67static void work_free_rcu(struct rcu_head *head)
68{
69 struct ieee80211_work *wk =
70 container_of(head, struct ieee80211_work, rcu_head);
71
72 kfree(wk);
73}
74
75void free_work(struct ieee80211_work *wk)
76{
77 call_rcu(&wk->rcu_head, work_free_rcu);
78}
79
80static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
81 struct ieee80211_supported_band *sband,
82 u32 *rates)
83{
84 int i, j, count;
85 *rates = 0;
86 count = 0;
87 for (i = 0; i < supp_rates_len; i++) {
88 int rate = (supp_rates[i] & 0x7F) * 5;
89
90 for (j = 0; j < sband->n_bitrates; j++)
91 if (sband->bitrates[j].bitrate == rate) {
92 *rates |= BIT(j);
93 count++;
94 break;
95 }
96 }
97
98 return count;
99}
100
101/* frame sending functions */
102
103static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
104 struct ieee80211_supported_band *sband,
105 struct ieee80211_channel *channel,
106 enum ieee80211_smps_mode smps)
107{
108 struct ieee80211_ht_info *ht_info;
109 u8 *pos;
110 u32 flags = channel->flags;
111 u16 cap = sband->ht_cap.cap;
112 __le16 tmp;
113
114 if (!sband->ht_cap.ht_supported)
115 return;
116
117 if (!ht_info_ie)
118 return;
119
120 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
121 return;
122
123 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
124
125 /* determine capability flags */
126
127 if (ieee80211_disable_40mhz_24ghz &&
128 sband->band == IEEE80211_BAND_2GHZ) {
129 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
130 cap &= ~IEEE80211_HT_CAP_SGI_40;
131 }
132
133 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
134 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
135 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
136 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
137 cap &= ~IEEE80211_HT_CAP_SGI_40;
138 }
139 break;
140 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
141 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
142 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
143 cap &= ~IEEE80211_HT_CAP_SGI_40;
144 }
145 break;
146 }
147
148 /* set SM PS mode properly */
149 cap &= ~IEEE80211_HT_CAP_SM_PS;
150 switch (smps) {
151 case IEEE80211_SMPS_AUTOMATIC:
152 case IEEE80211_SMPS_NUM_MODES:
153 WARN_ON(1);
154 case IEEE80211_SMPS_OFF:
155 cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
156 IEEE80211_HT_CAP_SM_PS_SHIFT;
157 break;
158 case IEEE80211_SMPS_STATIC:
159 cap |= WLAN_HT_CAP_SM_PS_STATIC <<
160 IEEE80211_HT_CAP_SM_PS_SHIFT;
161 break;
162 case IEEE80211_SMPS_DYNAMIC:
163 cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
164 IEEE80211_HT_CAP_SM_PS_SHIFT;
165 break;
166 }
167
168 /* reserve and fill IE */
169
170 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
171 *pos++ = WLAN_EID_HT_CAPABILITY;
172 *pos++ = sizeof(struct ieee80211_ht_cap);
173 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
174
175 /* capability flags */
176 tmp = cpu_to_le16(cap);
177 memcpy(pos, &tmp, sizeof(u16));
178 pos += sizeof(u16);
179
180 /* AMPDU parameters */
181 *pos++ = sband->ht_cap.ampdu_factor |
182 (sband->ht_cap.ampdu_density <<
183 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
184
185 /* MCS set */
186 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
187 pos += sizeof(sband->ht_cap.mcs);
188
189 /* extended capabilities */
190 pos += sizeof(__le16);
191
192 /* BF capabilities */
193 pos += sizeof(__le32);
194
195 /* antenna selection */
196 pos += sizeof(u8);
197}
198
199static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
200 struct ieee80211_work *wk)
201{
202 struct ieee80211_local *local = sdata->local;
203 struct sk_buff *skb;
204 struct ieee80211_mgmt *mgmt;
205 u8 *pos, qos_info;
206 const u8 *ies;
207 size_t offset = 0, noffset;
208 int i, len, count, rates_len, supp_rates_len;
209 u16 capab;
210 struct ieee80211_supported_band *sband;
211 u32 rates = 0;
212
213 sband = local->hw.wiphy->bands[wk->chan->band];
214
215 /*
216 * Get all rates supported by the device and the AP as
217 * some APs don't like getting a superset of their rates
218 * in the association request (e.g. D-Link DAP 1353 in
219 * b-only mode)...
220 */
221 rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
222 wk->assoc.supp_rates_len,
223 sband, &rates);
224
225 skb = alloc_skb(local->hw.extra_tx_headroom +
226 sizeof(*mgmt) + /* bit too much but doesn't matter */
227 2 + wk->assoc.ssid_len + /* SSID */
228 4 + rates_len + /* (extended) rates */
229 4 + /* power capability */
230 2 + 2 * sband->n_channels + /* supported channels */
231 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
232 wk->ie_len + /* extra IEs */
233 9, /* WMM */
234 GFP_KERNEL);
235 if (!skb) {
236 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
237 "frame\n", sdata->name);
238 return;
239 }
240 skb_reserve(skb, local->hw.extra_tx_headroom);
241
242 capab = WLAN_CAPABILITY_ESS;
243
244 if (sband->band == IEEE80211_BAND_2GHZ) {
245 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
246 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
247 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
248 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
249 }
250
251 if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
252 capab |= WLAN_CAPABILITY_PRIVACY;
253
254 if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
255 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
256 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
257
258 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
259 memset(mgmt, 0, 24);
260 memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
261 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
262 memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
263
264 if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
265 skb_put(skb, 10);
266 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
267 IEEE80211_STYPE_REASSOC_REQ);
268 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
269 mgmt->u.reassoc_req.listen_interval =
270 cpu_to_le16(local->hw.conf.listen_interval);
271 memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
272 ETH_ALEN);
273 } else {
274 skb_put(skb, 4);
275 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
276 IEEE80211_STYPE_ASSOC_REQ);
277 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
278 mgmt->u.assoc_req.listen_interval =
279 cpu_to_le16(local->hw.conf.listen_interval);
280 }
281
282 /* SSID */
283 ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len);
284 *pos++ = WLAN_EID_SSID;
285 *pos++ = wk->assoc.ssid_len;
286 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
287
288 /* add all rates which were marked to be used above */
289 supp_rates_len = rates_len;
290 if (supp_rates_len > 8)
291 supp_rates_len = 8;
292
293 len = sband->n_bitrates;
294 pos = skb_put(skb, supp_rates_len + 2);
295 *pos++ = WLAN_EID_SUPP_RATES;
296 *pos++ = supp_rates_len;
297
298 count = 0;
299 for (i = 0; i < sband->n_bitrates; i++) {
300 if (BIT(i) & rates) {
301 int rate = sband->bitrates[i].bitrate;
302 *pos++ = (u8) (rate / 5);
303 if (++count == 8)
304 break;
305 }
306 }
307
308 if (rates_len > count) {
309 pos = skb_put(skb, rates_len - count + 2);
310 *pos++ = WLAN_EID_EXT_SUPP_RATES;
311 *pos++ = rates_len - count;
312
313 for (i++; i < sband->n_bitrates; i++) {
314 if (BIT(i) & rates) {
315 int rate = sband->bitrates[i].bitrate;
316 *pos++ = (u8) (rate / 5);
317 }
318 }
319 }
320
321 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
322 /* 1. power capabilities */
323 pos = skb_put(skb, 4);
324 *pos++ = WLAN_EID_PWR_CAPABILITY;
325 *pos++ = 2;
326 *pos++ = 0; /* min tx power */
327 *pos++ = wk->chan->max_power; /* max tx power */
328
329 /* 2. supported channels */
330 /* TODO: get this in reg domain format */
331 pos = skb_put(skb, 2 * sband->n_channels + 2);
332 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
333 *pos++ = 2 * sband->n_channels;
334 for (i = 0; i < sband->n_channels; i++) {
335 *pos++ = ieee80211_frequency_to_channel(
336 sband->channels[i].center_freq);
337 *pos++ = 1; /* one channel in the subband*/
338 }
339 }
340
341 /* if present, add any custom IEs that go before HT */
342 if (wk->ie_len && wk->ie) {
343 static const u8 before_ht[] = {
344 WLAN_EID_SSID,
345 WLAN_EID_SUPP_RATES,
346 WLAN_EID_EXT_SUPP_RATES,
347 WLAN_EID_PWR_CAPABILITY,
348 WLAN_EID_SUPPORTED_CHANNELS,
349 WLAN_EID_RSN,
350 WLAN_EID_QOS_CAPA,
351 WLAN_EID_RRM_ENABLED_CAPABILITIES,
352 WLAN_EID_MOBILITY_DOMAIN,
353 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
354 };
355 noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
356 before_ht, ARRAY_SIZE(before_ht),
357 offset);
358 pos = skb_put(skb, noffset - offset);
359 memcpy(pos, wk->ie + offset, noffset - offset);
360 offset = noffset;
361 }
362
363 if (wk->assoc.use_11n && wk->assoc.wmm_used &&
364 local->hw.queues >= 4)
365 ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
366 sband, wk->chan, wk->assoc.smps);
367
368 /* if present, add any custom non-vendor IEs that go after HT */
369 if (wk->ie_len && wk->ie) {
370 noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
371 offset);
372 pos = skb_put(skb, noffset - offset);
373 memcpy(pos, wk->ie + offset, noffset - offset);
374 offset = noffset;
375 }
376
377 if (wk->assoc.wmm_used && local->hw.queues >= 4) {
378 if (wk->assoc.uapsd_used) {
379 qos_info = local->uapsd_queues;
380 qos_info |= (local->uapsd_max_sp_len <<
381 IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
382 } else {
383 qos_info = 0;
384 }
385
386 pos = skb_put(skb, 9);
387 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
388 *pos++ = 7; /* len */
389 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
390 *pos++ = 0x50;
391 *pos++ = 0xf2;
392 *pos++ = 2; /* WME */
393 *pos++ = 0; /* WME info */
394 *pos++ = 1; /* WME ver */
395 *pos++ = qos_info;
396 }
397
398 /* add any remaining custom (i.e. vendor specific here) IEs */
399 if (wk->ie_len && wk->ie) {
400 noffset = wk->ie_len;
401 pos = skb_put(skb, noffset - offset);
402 memcpy(pos, wk->ie + offset, noffset - offset);
403 }
404
405 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
406 ieee80211_tx_skb(sdata, skb);
407}
408
409static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
410 struct ieee80211_work *wk)
411{
412 struct cfg80211_bss *cbss;
413 u16 capa_val = WLAN_CAPABILITY_ESS;
414
415 if (wk->probe_auth.privacy)
416 capa_val |= WLAN_CAPABILITY_PRIVACY;
417
418 cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
419 wk->probe_auth.ssid, wk->probe_auth.ssid_len,
420 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
421 capa_val);
422 if (!cbss)
423 return;
424
425 cfg80211_unlink_bss(local->hw.wiphy, cbss);
426 cfg80211_put_bss(cbss);
427}
428
429static enum work_action __must_check
430ieee80211_direct_probe(struct ieee80211_work *wk)
431{
432 struct ieee80211_sub_if_data *sdata = wk->sdata;
433 struct ieee80211_local *local = sdata->local;
434
435 wk->probe_auth.tries++;
436 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
437 printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
438 sdata->name, wk->filter_ta);
439
440 /*
441 * Most likely AP is not in the range so remove the
442 * bss struct for that AP.
443 */
444 ieee80211_remove_auth_bss(local, wk);
445
446 return WORK_ACT_TIMEOUT;
447 }
448
449 printk(KERN_DEBUG "%s: direct probe to %pM (try %d)\n",
450 sdata->name, wk->filter_ta, wk->probe_auth.tries);
451
452 /*
453 * Direct probe is sent to broadcast address as some APs
454 * will not answer to direct packet in unassociated state.
455 */
456 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
457 wk->probe_auth.ssid_len, NULL, 0);
458
459 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
460 run_again(local, wk->timeout);
461
462 return WORK_ACT_NONE;
463}
464
465
466static enum work_action __must_check
467ieee80211_authenticate(struct ieee80211_work *wk)
468{
469 struct ieee80211_sub_if_data *sdata = wk->sdata;
470 struct ieee80211_local *local = sdata->local;
471
472 wk->probe_auth.tries++;
473 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
474 printk(KERN_DEBUG "%s: authentication with %pM"
475 " timed out\n", sdata->name, wk->filter_ta);
476
477 /*
478 * Most likely AP is not in the range so remove the
479 * bss struct for that AP.
480 */
481 ieee80211_remove_auth_bss(local, wk);
482
483 return WORK_ACT_TIMEOUT;
484 }
485
486 printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
487 sdata->name, wk->filter_ta, wk->probe_auth.tries);
488
489 ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
490 wk->ie_len, wk->filter_ta, NULL, 0, 0);
491 wk->probe_auth.transaction = 2;
492
493 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
494 run_again(local, wk->timeout);
495
496 return WORK_ACT_NONE;
497}
498
499static enum work_action __must_check
500ieee80211_associate(struct ieee80211_work *wk)
501{
502 struct ieee80211_sub_if_data *sdata = wk->sdata;
503 struct ieee80211_local *local = sdata->local;
504
505 wk->assoc.tries++;
506 if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
507 printk(KERN_DEBUG "%s: association with %pM"
508 " timed out\n",
509 sdata->name, wk->filter_ta);
510
511 /*
512 * Most likely AP is not in the range so remove the
513 * bss struct for that AP.
514 */
515 if (wk->assoc.bss)
516 cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
517
518 return WORK_ACT_TIMEOUT;
519 }
520
521 printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
522 sdata->name, wk->filter_ta, wk->assoc.tries);
523 ieee80211_send_assoc(sdata, wk);
524
525 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
526 run_again(local, wk->timeout);
527
528 return WORK_ACT_NONE;
529}
530
531static enum work_action __must_check
532ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
533{
534 /*
535 * First time we run, do nothing -- the generic code will
536 * have switched to the right channel etc.
537 */
538 if (!wk->remain.started) {
539 wk->remain.started = true;
540 wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
541
542 cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
543 wk->chan, wk->chan_type,
544 wk->remain.duration, GFP_KERNEL);
545
546 return WORK_ACT_NONE;
547 }
548
549 return WORK_ACT_TIMEOUT;
550}
551
552static void ieee80211_auth_challenge(struct ieee80211_work *wk,
553 struct ieee80211_mgmt *mgmt,
554 size_t len)
555{
556 struct ieee80211_sub_if_data *sdata = wk->sdata;
557 u8 *pos;
558 struct ieee802_11_elems elems;
559
560 pos = mgmt->u.auth.variable;
561 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
562 if (!elems.challenge)
563 return;
564 ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
565 elems.challenge - 2, elems.challenge_len + 2,
566 wk->filter_ta, wk->probe_auth.key,
567 wk->probe_auth.key_len, wk->probe_auth.key_idx);
568 wk->probe_auth.transaction = 4;
569}
570
571static enum work_action __must_check
572ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
573 struct ieee80211_mgmt *mgmt, size_t len)
574{
575 u16 auth_alg, auth_transaction, status_code;
576
577 if (wk->type != IEEE80211_WORK_AUTH)
578 return WORK_ACT_NONE;
579
580 if (len < 24 + 6)
581 return WORK_ACT_NONE;
582
583 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
584 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
585 status_code = le16_to_cpu(mgmt->u.auth.status_code);
586
587 if (auth_alg != wk->probe_auth.algorithm ||
588 auth_transaction != wk->probe_auth.transaction)
589 return WORK_ACT_NONE;
590
591 if (status_code != WLAN_STATUS_SUCCESS) {
592 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
593 wk->sdata->name, mgmt->sa, status_code);
594 return WORK_ACT_DONE;
595 }
596
597 switch (wk->probe_auth.algorithm) {
598 case WLAN_AUTH_OPEN:
599 case WLAN_AUTH_LEAP:
600 case WLAN_AUTH_FT:
601 break;
602 case WLAN_AUTH_SHARED_KEY:
603 if (wk->probe_auth.transaction != 4) {
604 ieee80211_auth_challenge(wk, mgmt, len);
605 /* need another frame */
606 return WORK_ACT_NONE;
607 }
608 break;
609 default:
610 WARN_ON(1);
611 return WORK_ACT_NONE;
612 }
613
614 printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
615 return WORK_ACT_DONE;
616}
617
618static enum work_action __must_check
619ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
620 struct ieee80211_mgmt *mgmt, size_t len,
621 bool reassoc)
622{
623 struct ieee80211_sub_if_data *sdata = wk->sdata;
624 struct ieee80211_local *local = sdata->local;
625 u16 capab_info, status_code, aid;
626 struct ieee802_11_elems elems;
627 u8 *pos;
628
629 /*
630 * AssocResp and ReassocResp have identical structure, so process both
631 * of them in this function.
632 */
633
634 if (len < 24 + 6)
635 return WORK_ACT_NONE;
636
637 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
638 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
639 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
640
641 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
642 "status=%d aid=%d)\n",
643 sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
644 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
645
646 pos = mgmt->u.assoc_resp.variable;
647 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
648
649 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
650 elems.timeout_int && elems.timeout_int_len == 5 &&
651 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
652 u32 tu, ms;
653 tu = get_unaligned_le32(elems.timeout_int + 1);
654 ms = tu * 1024 / 1000;
655 printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
656 "comeback duration %u TU (%u ms)\n",
657 sdata->name, mgmt->sa, tu, ms);
658 wk->timeout = jiffies + msecs_to_jiffies(ms);
659 if (ms > IEEE80211_ASSOC_TIMEOUT)
660 run_again(local, wk->timeout);
661 return WORK_ACT_NONE;
662 }
663
664 if (status_code != WLAN_STATUS_SUCCESS)
665 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
666 sdata->name, mgmt->sa, status_code);
667 else
668 printk(KERN_DEBUG "%s: associated\n", sdata->name);
669
670 return WORK_ACT_DONE;
671}
672
673static enum work_action __must_check
674ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
675 struct ieee80211_mgmt *mgmt, size_t len,
676 struct ieee80211_rx_status *rx_status)
677{
678 struct ieee80211_sub_if_data *sdata = wk->sdata;
679 struct ieee80211_local *local = sdata->local;
680 size_t baselen;
681
682 ASSERT_WORK_MTX(local);
683
684 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
685 if (baselen > len)
686 return WORK_ACT_NONE;
687
688 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
689 return WORK_ACT_DONE;
690}
691
692static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
693 struct sk_buff *skb)
694{
695 struct ieee80211_rx_status *rx_status;
696 struct ieee80211_mgmt *mgmt;
697 struct ieee80211_work *wk;
698 enum work_action rma = WORK_ACT_NONE;
699 u16 fc;
700
701 rx_status = (struct ieee80211_rx_status *) skb->cb;
702 mgmt = (struct ieee80211_mgmt *) skb->data;
703 fc = le16_to_cpu(mgmt->frame_control);
704
705 mutex_lock(&local->work_mtx);
706
707 list_for_each_entry(wk, &local->work_list, list) {
708 const u8 *bssid = NULL;
709
710 switch (wk->type) {
711 case IEEE80211_WORK_DIRECT_PROBE:
712 case IEEE80211_WORK_AUTH:
713 case IEEE80211_WORK_ASSOC:
714 bssid = wk->filter_ta;
715 break;
716 default:
717 continue;
718 }
719
720 /*
721 * Before queuing, we already verified mgmt->sa,
722 * so this is needed just for matching.
723 */
724 if (compare_ether_addr(bssid, mgmt->bssid))
725 continue;
726
727 switch (fc & IEEE80211_FCTL_STYPE) {
728 case IEEE80211_STYPE_PROBE_RESP:
729 rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
730 rx_status);
731 break;
732 case IEEE80211_STYPE_AUTH:
733 rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
734 break;
735 case IEEE80211_STYPE_ASSOC_RESP:
736 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
737 skb->len, false);
738 break;
739 case IEEE80211_STYPE_REASSOC_RESP:
740 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
741 skb->len, true);
742 break;
743 default:
744 WARN_ON(1);
745 }
746 /*
747 * We've processed this frame for that work, so it can't
748 * belong to another work struct.
749 * NB: this is also required for correctness for 'rma'!
750 */
751 break;
752 }
753
754 switch (rma) {
755 case WORK_ACT_NONE:
756 break;
757 case WORK_ACT_DONE:
758 list_del_rcu(&wk->list);
759 break;
760 default:
761 WARN(1, "unexpected: %d", rma);
762 }
763
764 mutex_unlock(&local->work_mtx);
765
766 if (rma != WORK_ACT_DONE)
767 goto out;
768
769 switch (wk->done(wk, skb)) {
770 case WORK_DONE_DESTROY:
771 free_work(wk);
772 break;
773 case WORK_DONE_REQUEUE:
774 synchronize_rcu();
775 wk->started = false; /* restart */
776 mutex_lock(&local->work_mtx);
777 list_add_tail(&wk->list, &local->work_list);
778 mutex_unlock(&local->work_mtx);
779 }
780
781 out:
782 kfree_skb(skb);
783}
784
785static void ieee80211_work_timer(unsigned long data)
786{
787 struct ieee80211_local *local = (void *) data;
788
789 if (local->quiescing)
790 return;
791
792 ieee80211_queue_work(&local->hw, &local->work_work);
793}
794
795static void ieee80211_work_work(struct work_struct *work)
796{
797 struct ieee80211_local *local =
798 container_of(work, struct ieee80211_local, work_work);
799 struct sk_buff *skb;
800 struct ieee80211_work *wk, *tmp;
801 LIST_HEAD(free_work);
802 enum work_action rma;
803 bool remain_off_channel = false;
804
805 if (local->scanning)
806 return;
807
808 /*
809 * ieee80211_queue_work() should have picked up most cases,
810 * here we'll pick the the rest.
811 */
812 if (WARN(local->suspended, "work scheduled while going to suspend\n"))
813 return;
814
815 /* first process frames to avoid timing out while a frame is pending */
816 while ((skb = skb_dequeue(&local->work_skb_queue)))
817 ieee80211_work_rx_queued_mgmt(local, skb);
818
819 ieee80211_recalc_idle(local);
820
821 mutex_lock(&local->work_mtx);
822
823 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
824 /* mark work as started if it's on the current off-channel */
825 if (!wk->started && local->tmp_channel &&
826 wk->chan == local->tmp_channel &&
827 wk->chan_type == local->tmp_channel_type) {
828 wk->started = true;
829 wk->timeout = jiffies;
830 }
831
832 if (!wk->started && !local->tmp_channel) {
833 /*
834 * TODO: could optimize this by leaving the
835 * station vifs in awake mode if they
836 * happen to be on the same channel as
837 * the requested channel
838 */
839 ieee80211_offchannel_stop_beaconing(local);
840 ieee80211_offchannel_stop_station(local);
841
842 local->tmp_channel = wk->chan;
843 local->tmp_channel_type = wk->chan_type;
844 ieee80211_hw_config(local, 0);
845 wk->started = true;
846 wk->timeout = jiffies;
847 }
848
849 /* don't try to work with items that aren't started */
850 if (!wk->started)
851 continue;
852
853 if (time_is_after_jiffies(wk->timeout)) {
854 /*
855 * This work item isn't supposed to be worked on
856 * right now, but take care to adjust the timer
857 * properly.
858 */
859 run_again(local, wk->timeout);
860 continue;
861 }
862
863 switch (wk->type) {
864 default:
865 WARN_ON(1);
866 /* nothing */
867 rma = WORK_ACT_NONE;
868 break;
869 case IEEE80211_WORK_ABORT:
870 rma = WORK_ACT_TIMEOUT;
871 case IEEE80211_WORK_DIRECT_PROBE:
872 rma = ieee80211_direct_probe(wk);
873 break;
874 case IEEE80211_WORK_AUTH:
875 rma = ieee80211_authenticate(wk);
876 break;
877 case IEEE80211_WORK_ASSOC:
878 rma = ieee80211_associate(wk);
879 break;
880 case IEEE80211_WORK_REMAIN_ON_CHANNEL:
881 rma = ieee80211_remain_on_channel_timeout(wk);
882 break;
883 }
884
885 switch (rma) {
886 case WORK_ACT_NONE:
887 /* might have changed the timeout */
888 run_again(local, wk->timeout);
889 break;
890 case WORK_ACT_TIMEOUT:
891 list_del_rcu(&wk->list);
892 synchronize_rcu();
893 list_add(&wk->list, &free_work);
894 break;
895 default:
896 WARN(1, "unexpected: %d", rma);
897 }
898 }
899
900 list_for_each_entry(wk, &local->work_list, list) {
901 if (!wk->started)
902 continue;
903 if (wk->chan != local->tmp_channel)
904 continue;
905 if (wk->chan_type != local->tmp_channel_type)
906 continue;
907 remain_off_channel = true;
908 }
909
910 if (!remain_off_channel && local->tmp_channel) {
911 local->tmp_channel = NULL;
912 ieee80211_hw_config(local, 0);
913 ieee80211_offchannel_return(local, true);
914 /* give connection some time to breathe */
915 run_again(local, jiffies + HZ/2);
916 }
917
918 if (list_empty(&local->work_list) && local->scan_req)
919 ieee80211_queue_delayed_work(&local->hw,
920 &local->scan_work,
921 round_jiffies_relative(0));
922
923 mutex_unlock(&local->work_mtx);
924
925 ieee80211_recalc_idle(local);
926
927 list_for_each_entry_safe(wk, tmp, &free_work, list) {
928 wk->done(wk, NULL);
929 list_del(&wk->list);
930 kfree(wk);
931 }
932}
933
934void ieee80211_add_work(struct ieee80211_work *wk)
935{
936 struct ieee80211_local *local;
937
938 if (WARN_ON(!wk->chan))
939 return;
940
941 if (WARN_ON(!wk->sdata))
942 return;
943
944 if (WARN_ON(!wk->done))
945 return;
946
947 if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
948 return;
949
950 wk->started = false;
951
952 local = wk->sdata->local;
953 mutex_lock(&local->work_mtx);
954 list_add_tail(&wk->list, &local->work_list);
955 mutex_unlock(&local->work_mtx);
956
957 ieee80211_queue_work(&local->hw, &local->work_work);
958}
959
960void ieee80211_work_init(struct ieee80211_local *local)
961{
962 mutex_init(&local->work_mtx);
963 INIT_LIST_HEAD(&local->work_list);
964 setup_timer(&local->work_timer, ieee80211_work_timer,
965 (unsigned long)local);
966 INIT_WORK(&local->work_work, ieee80211_work_work);
967 skb_queue_head_init(&local->work_skb_queue);
968}
969
970void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
971{
972 struct ieee80211_local *local = sdata->local;
973 struct ieee80211_work *wk;
974
975 mutex_lock(&local->work_mtx);
976 list_for_each_entry(wk, &local->work_list, list) {
977 if (wk->sdata != sdata)
978 continue;
979 wk->type = IEEE80211_WORK_ABORT;
980 wk->started = true;
981 wk->timeout = jiffies;
982 }
983 mutex_unlock(&local->work_mtx);
984
985 /* run cleanups etc. */
986 ieee80211_work_work(&local->work_work);
987
988 mutex_lock(&local->work_mtx);
989 list_for_each_entry(wk, &local->work_list, list) {
990 if (wk->sdata != sdata)
991 continue;
992 WARN_ON(1);
993 break;
994 }
995 mutex_unlock(&local->work_mtx);
996}
997
998ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
999 struct sk_buff *skb)
1000{
1001 struct ieee80211_local *local = sdata->local;
1002 struct ieee80211_mgmt *mgmt;
1003 struct ieee80211_work *wk;
1004 u16 fc;
1005
1006 if (skb->len < 24)
1007 return RX_DROP_MONITOR;
1008
1009 mgmt = (struct ieee80211_mgmt *) skb->data;
1010 fc = le16_to_cpu(mgmt->frame_control);
1011
1012 list_for_each_entry_rcu(wk, &local->work_list, list) {
1013 if (sdata != wk->sdata)
1014 continue;
1015 if (compare_ether_addr(wk->filter_ta, mgmt->sa))
1016 continue;
1017 if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
1018 continue;
1019
1020 switch (fc & IEEE80211_FCTL_STYPE) {
1021 case IEEE80211_STYPE_AUTH:
1022 case IEEE80211_STYPE_PROBE_RESP:
1023 case IEEE80211_STYPE_ASSOC_RESP:
1024 case IEEE80211_STYPE_REASSOC_RESP:
1025 case IEEE80211_STYPE_DEAUTH:
1026 case IEEE80211_STYPE_DISASSOC:
1027 skb_queue_tail(&local->work_skb_queue, skb);
1028 ieee80211_queue_work(&local->hw, &local->work_work);
1029 return RX_QUEUED;
1030 }
1031 }
1032
1033 return RX_CONTINUE;
1034}
1035
1036static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
1037 struct sk_buff *skb)
1038{
1039 /*
1040 * We are done serving the remain-on-channel command.
1041 */
1042 cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
1043 wk->chan, wk->chan_type,
1044 GFP_KERNEL);
1045
1046 return WORK_DONE_DESTROY;
1047}
1048
1049int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1050 struct ieee80211_channel *chan,
1051 enum nl80211_channel_type channel_type,
1052 unsigned int duration, u64 *cookie)
1053{
1054 struct ieee80211_work *wk;
1055
1056 wk = kzalloc(sizeof(*wk), GFP_KERNEL);
1057 if (!wk)
1058 return -ENOMEM;
1059
1060 wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
1061 wk->chan = chan;
1062 wk->chan_type = channel_type;
1063 wk->sdata = sdata;
1064 wk->done = ieee80211_remain_done;
1065
1066 wk->remain.duration = duration;
1067
1068 *cookie = (unsigned long) wk;
1069
1070 ieee80211_add_work(wk);
1071
1072 return 0;
1073}
1074
1075int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1076 u64 cookie)
1077{
1078 struct ieee80211_local *local = sdata->local;
1079 struct ieee80211_work *wk, *tmp;
1080 bool found = false;
1081
1082 mutex_lock(&local->work_mtx);
1083 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
1084 if ((unsigned long) wk == cookie) {
1085 wk->timeout = jiffies;
1086 found = true;
1087 break;
1088 }
1089 }
1090 mutex_unlock(&local->work_mtx);
1091
1092 if (!found)
1093 return -ENOENT;
1094
1095 ieee80211_queue_work(&local->hw, &local->work_work);
1096
1097 return 0;
1098}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index d07ecda0a92..a4b6e148c5d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -681,9 +681,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
681 int chains_to_skip = cb->args[0]; 681 int chains_to_skip = cb->args[0];
682 int fams_to_skip = cb->args[1]; 682 int fams_to_skip = cb->args[1];
683 683
684 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { 684 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
685 if (i < chains_to_skip)
686 continue;
687 n = 0; 685 n = 0;
688 list_for_each_entry(rt, genl_family_chain(i), family_list) { 686 list_for_each_entry(rt, genl_family_chain(i), family_list) {
689 if (!rt->netnsok && !net_eq(net, &init_net)) 687 if (!rt->netnsok && !net_eq(net, &init_net))
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f126d18dbdc..53633c5fdb1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2457,7 +2457,7 @@ static const struct file_operations packet_seq_fops = {
2457 2457
2458#endif 2458#endif
2459 2459
2460static int packet_net_init(struct net *net) 2460static int __net_init packet_net_init(struct net *net)
2461{ 2461{
2462 rwlock_init(&net->packet.sklist_lock); 2462 rwlock_init(&net->packet.sklist_lock);
2463 INIT_HLIST_HEAD(&net->packet.sklist); 2463 INIT_HLIST_HEAD(&net->packet.sklist);
@@ -2468,7 +2468,7 @@ static int packet_net_init(struct net *net)
2468 return 0; 2468 return 0;
2469} 2469}
2470 2470
2471static void packet_net_exit(struct net *net) 2471static void __net_exit packet_net_exit(struct net *net)
2472{ 2472{
2473 proc_net_remove(net, "packet"); 2473 proc_net_remove(net, "packet");
2474} 2474}
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 67f072e94d0..387197b579b 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -75,7 +75,8 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
75 struct sk_buff *skb; 75 struct sk_buff *skb;
76 int err; 76 int err;
77 77
78 if (msg->msg_flags & MSG_OOB) 78 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
79 MSG_CMSG_COMPAT))
79 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
80 81
81 if (msg->msg_name == NULL) 82 if (msg->msg_name == NULL)
@@ -119,7 +120,8 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
119 int rval = -EOPNOTSUPP; 120 int rval = -EOPNOTSUPP;
120 int copylen; 121 int copylen;
121 122
122 if (flags & MSG_OOB) 123 if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
124 MSG_CMSG_COMPAT))
123 goto out_nofree; 125 goto out_nofree;
124 126
125 if (addr_len) 127 if (addr_len)
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index d183509d3fa..d01208968c8 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -96,11 +96,11 @@ static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
96 goto drop; 96 goto drop;
97 } 97 }
98 98
99 if (likely(skb_headroom(skb) & 3)) { 99 if (skb_headroom(skb) & 3) {
100 struct sk_buff *rskb, *fs; 100 struct sk_buff *rskb, *fs;
101 int flen = 0; 101 int flen = 0;
102 102
103 /* Phonet Pipe data header is misaligned (3 bytes), 103 /* Phonet Pipe data header may be misaligned (3 bytes),
104 * so wrap the IP packet as a single fragment of an head-less 104 * so wrap the IP packet as a single fragment of an head-less
105 * socket buffer. The network stack will pull what it needs, 105 * socket buffer. The network stack will pull what it needs,
106 * but at least, the whole IP payload is not memcpy'd. */ 106 * but at least, the whole IP payload is not memcpy'd. */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b6356f3832f..360cf377693 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -354,6 +354,9 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
354 queue = &pn->ctrlreq_queue; 354 queue = &pn->ctrlreq_queue;
355 goto queue; 355 goto queue;
356 356
357 case PNS_PIPE_ALIGNED_DATA:
358 __skb_pull(skb, 1);
359 /* fall through */
357 case PNS_PIPE_DATA: 360 case PNS_PIPE_DATA:
358 __skb_pull(skb, 3); /* Pipe data header */ 361 __skb_pull(skb, 3); /* Pipe data header */
359 if (!pn_flow_safe(pn->rx_fc)) { 362 if (!pn_flow_safe(pn->rx_fc)) {
@@ -441,6 +444,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
441 struct sockaddr_pn dst; 444 struct sockaddr_pn dst;
442 u16 peer_type; 445 u16 peer_type;
443 u8 pipe_handle, enabled, n_sb; 446 u8 pipe_handle, enabled, n_sb;
447 u8 aligned = 0;
444 448
445 if (!pskb_pull(skb, sizeof(*hdr) + 4)) 449 if (!pskb_pull(skb, sizeof(*hdr) + 4))
446 return -EINVAL; 450 return -EINVAL;
@@ -479,6 +483,9 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
479 return -EINVAL; 483 return -EINVAL;
480 peer_type = (peer_type & 0xff00) | data[0]; 484 peer_type = (peer_type & 0xff00) | data[0];
481 break; 485 break;
486 case PN_PIPE_SB_ALIGNED_DATA:
487 aligned = data[0] != 0;
488 break;
482 } 489 }
483 n_sb--; 490 n_sb--;
484 } 491 }
@@ -510,6 +517,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
510 newpn->rx_credits = 0; 517 newpn->rx_credits = 0;
511 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; 518 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
512 newpn->init_enable = enabled; 519 newpn->init_enable = enabled;
520 newpn->aligned = aligned;
513 521
514 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue)); 522 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
515 skb_queue_head(&newsk->sk_receive_queue, skb); 523 skb_queue_head(&newsk->sk_receive_queue, skb);
@@ -829,11 +837,15 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
829 return -ENOBUFS; 837 return -ENOBUFS;
830 } 838 }
831 839
832 skb_push(skb, 3); 840 skb_push(skb, 3 + pn->aligned);
833 skb_reset_transport_header(skb); 841 skb_reset_transport_header(skb);
834 ph = pnp_hdr(skb); 842 ph = pnp_hdr(skb);
835 ph->utid = 0; 843 ph->utid = 0;
836 ph->message_id = PNS_PIPE_DATA; 844 if (pn->aligned) {
845 ph->message_id = PNS_PIPE_ALIGNED_DATA;
846 ph->data[0] = 0; /* padding */
847 } else
848 ph->message_id = PNS_PIPE_DATA;
837 ph->pipe_handle = pn->pipe_handle; 849 ph->pipe_handle = pn->pipe_handle;
838 850
839 return pn_skb_send(sk, skb, &pipe_srv); 851 return pn_skb_send(sk, skb, &pipe_srv);
@@ -848,7 +860,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
848 int flags = msg->msg_flags; 860 int flags = msg->msg_flags;
849 int err, done; 861 int err, done;
850 862
851 if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR)) 863 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
864 MSG_CMSG_COMPAT)) ||
865 !(msg->msg_flags & MSG_EOR))
852 return -EOPNOTSUPP; 866 return -EOPNOTSUPP;
853 867
854 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, 868 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
@@ -927,6 +941,9 @@ int pep_write(struct sock *sk, struct sk_buff *skb)
927 struct sk_buff *rskb, *fs; 941 struct sk_buff *rskb, *fs;
928 int flen = 0; 942 int flen = 0;
929 943
944 if (pep_sk(sk)->aligned)
945 return pipe_skb_send(sk, skb);
946
930 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC); 947 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
931 if (!rskb) { 948 if (!rskb) {
932 kfree_skb(skb); 949 kfree_skb(skb);
@@ -966,6 +983,10 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
966 struct sk_buff *skb; 983 struct sk_buff *skb;
967 int err; 984 int err;
968 985
986 if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
987 MSG_NOSIGNAL|MSG_CMSG_COMPAT))
988 return -EOPNOTSUPP;
989
969 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE))) 990 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
970 return -ENOTCONN; 991 return -ENOTCONN;
971 992
@@ -973,6 +994,8 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
973 /* Dequeue and acknowledge control request */ 994 /* Dequeue and acknowledge control request */
974 struct pep_sock *pn = pep_sk(sk); 995 struct pep_sock *pn = pep_sk(sk);
975 996
997 if (flags & MSG_PEEK)
998 return -EOPNOTSUPP;
976 skb = skb_dequeue(&pn->ctrlreq_queue); 999 skb = skb_dequeue(&pn->ctrlreq_queue);
977 if (skb) { 1000 if (skb) {
978 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR, 1001 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index bc4a33bf2d3..c597cc53a6f 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -311,7 +311,7 @@ static struct notifier_block phonet_device_notifier = {
311}; 311};
312 312
313/* Per-namespace Phonet devices handling */ 313/* Per-namespace Phonet devices handling */
314static int phonet_init_net(struct net *net) 314static int __net_init phonet_init_net(struct net *net)
315{ 315{
316 struct phonet_net *pnn = net_generic(net, phonet_net_id); 316 struct phonet_net *pnn = net_generic(net, phonet_net_id);
317 317
@@ -324,7 +324,7 @@ static int phonet_init_net(struct net *net)
324 return 0; 324 return 0;
325} 325}
326 326
327static void phonet_exit_net(struct net *net) 327static void __net_exit phonet_exit_net(struct net *net)
328{ 328{
329 struct phonet_net *pnn = net_generic(net, phonet_net_id); 329 struct phonet_net *pnn = net_generic(net, phonet_net_id);
330 struct net_device *dev; 330 struct net_device *dev;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 75fd1c672c6..6cd491013b5 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1707,6 +1707,7 @@ static int __init pktsched_init(void)
1707{ 1707{
1708 register_qdisc(&pfifo_qdisc_ops); 1708 register_qdisc(&pfifo_qdisc_ops);
1709 register_qdisc(&bfifo_qdisc_ops); 1709 register_qdisc(&bfifo_qdisc_ops);
1710 register_qdisc(&pfifo_head_drop_qdisc_ops);
1710 register_qdisc(&mq_qdisc_ops); 1711 register_qdisc(&mq_qdisc_ops);
1711 proc_net_fops_create(&init_net, "psched", 0, &psched_fops); 1712 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1712 1713
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 69188e8358b..4b0a6cc44c7 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -43,6 +43,26 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
43 return qdisc_reshape_fail(skb, sch); 43 return qdisc_reshape_fail(skb, sch);
44} 44}
45 45
46static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
47{
48 struct sk_buff *skb_head;
49 struct fifo_sched_data *q = qdisc_priv(sch);
50
51 if (likely(skb_queue_len(&sch->q) < q->limit))
52 return qdisc_enqueue_tail(skb, sch);
53
54 /* queue full, remove one skb to fulfill the limit */
55 skb_head = qdisc_dequeue_head(sch);
56 sch->bstats.bytes -= qdisc_pkt_len(skb_head);
57 sch->bstats.packets--;
58 sch->qstats.drops++;
59 kfree_skb(skb_head);
60
61 qdisc_enqueue_tail(skb, sch);
62
63 return NET_XMIT_CN;
64}
65
46static int fifo_init(struct Qdisc *sch, struct nlattr *opt) 66static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
47{ 67{
48 struct fifo_sched_data *q = qdisc_priv(sch); 68 struct fifo_sched_data *q = qdisc_priv(sch);
@@ -108,6 +128,20 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
108}; 128};
109EXPORT_SYMBOL(bfifo_qdisc_ops); 129EXPORT_SYMBOL(bfifo_qdisc_ops);
110 130
131struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
132 .id = "pfifo_head_drop",
133 .priv_size = sizeof(struct fifo_sched_data),
134 .enqueue = pfifo_tail_enqueue,
135 .dequeue = qdisc_dequeue_head,
136 .peek = qdisc_peek_head,
137 .drop = qdisc_queue_drop_head,
138 .init = fifo_init,
139 .reset = qdisc_reset_queue,
140 .change = fifo_init,
141 .dump = fifo_dump,
142 .owner = THIS_MODULE,
143};
144
111/* Pass size change message down to embedded FIFO */ 145/* Pass size change message down to embedded FIFO */
112int fifo_set_limit(struct Qdisc *q, unsigned int limit) 146int fifo_set_limit(struct Qdisc *q, unsigned int limit)
113{ 147{
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index d093cbfeaac..a5ac6e0a8d9 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -40,7 +40,7 @@
40#include <net/sctp/sctp.h> 40#include <net/sctp/sctp.h>
41#include <net/ip.h> /* for snmp_fold_field */ 41#include <net/ip.h> /* for snmp_fold_field */
42 42
43static struct snmp_mib sctp_snmp_list[] = { 43static const struct snmp_mib sctp_snmp_list[] = {
44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), 44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), 45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), 46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 67fdac9d2d3..f6d1e59c415 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6359,7 +6359,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
6359 struct sctp_association *asoc) 6359 struct sctp_association *asoc)
6360{ 6360{
6361 struct inet_sock *inet = inet_sk(sk); 6361 struct inet_sock *inet = inet_sk(sk);
6362 struct inet_sock *newinet = inet_sk(newsk); 6362 struct inet_sock *newinet;
6363 6363
6364 newsk->sk_type = sk->sk_type; 6364 newsk->sk_type = sk->sk_type;
6365 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6365 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 0b15d7250c4..53196009160 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -71,7 +71,7 @@ static struct ctl_table_root net_sysctl_ro_root = {
71 .permissions = net_ctl_ro_header_perms, 71 .permissions = net_ctl_ro_header_perms,
72}; 72};
73 73
74static int sysctl_net_init(struct net *net) 74static int __net_init sysctl_net_init(struct net *net)
75{ 75{
76 setup_sysctl_set(&net->sysctls, 76 setup_sysctl_set(&net->sysctls,
77 &net_sysctl_ro_root.default_set, 77 &net_sysctl_ro_root.default_set,
@@ -79,7 +79,7 @@ static int sysctl_net_init(struct net *net)
79 return 0; 79 return 0;
80} 80}
81 81
82static void sysctl_net_exit(struct net *net) 82static void __net_exit sysctl_net_exit(struct net *net)
83{ 83{
84 WARN_ON(!list_empty(&net->sysctls.list)); 84 WARN_ON(!list_empty(&net->sysctls.list));
85 return; 85 return;
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 3b30d1130b6..b74f78d0c03 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -10,7 +10,7 @@ menuconfig TIPC
10 specially designed for intra cluster communication. This protocol 10 specially designed for intra cluster communication. This protocol
11 originates from Ericsson where it has been used in carrier grade 11 originates from Ericsson where it has been used in carrier grade
12 cluster applications for many years. 12 cluster applications for many years.
13 13
14 For more information about TIPC, see http://tipc.sourceforge.net. 14 For more information about TIPC, see http://tipc.sourceforge.net.
15 15
16 This protocol support is also available as a module ( = code which 16 This protocol support is also available as a module ( = code which
@@ -23,91 +23,76 @@ menuconfig TIPC
23if TIPC 23if TIPC
24 24
25config TIPC_ADVANCED 25config TIPC_ADVANCED
26 bool "TIPC: Advanced configuration" 26 bool "Advanced TIPC configuration"
27 default n 27 default n
28 help 28 help
29 Saying Y here will open some advanced configuration 29 Saying Y here will open some advanced configuration for TIPC.
30 for TIPC. Most users do not need to bother, so if 30 Most users do not need to bother; if unsure, just say N.
31 unsure, just say N.
32 31
33config TIPC_ZONES 32config TIPC_ZONES
34 int "Maximum number of zones in network" 33 int "Maximum number of zones in a network"
35 depends on TIPC_ADVANCED 34 depends on TIPC_ADVANCED
35 range 1 255
36 default "3" 36 default "3"
37 help 37 help
38 Max number of zones inside TIPC network. Max supported value 38 Specifies how many zones can be supported in a TIPC network.
39 is 255 zones, minimum is 1 39 Can range from 1 to 255 zones; default is 3.
40 40
41 Default is 3 zones in a network; setting this to higher 41 Setting this to a smaller value saves some memory;
42 allows more zones but might use more memory. 42 setting it to a higher value allows for more zones.
43 43
44config TIPC_CLUSTERS 44config TIPC_CLUSTERS
45 int "Maximum number of clusters in a zone" 45 int "Maximum number of clusters in a zone"
46 depends on TIPC_ADVANCED 46 depends on TIPC_ADVANCED
47 range 1 1
47 default "1" 48 default "1"
48 help 49 help
49 ***Only 1 (one cluster in a zone) is supported by current code. 50 Specifies how many clusters can be supported in a TIPC zone.
50 Any value set here will be overridden.***
51
52 (Max number of clusters inside TIPC zone. Max supported
53 value is 4095 clusters, minimum is 1.
54 51
55 Default is 1; setting this to smaller value might save 52 *** Currently TIPC only supports a single cluster per zone. ***
56 some memory, setting it to higher
57 allows more clusters and might consume more memory.)
58 53
59config TIPC_NODES 54config TIPC_NODES
60 int "Maximum number of nodes in cluster" 55 int "Maximum number of nodes in a cluster"
61 depends on TIPC_ADVANCED 56 depends on TIPC_ADVANCED
57 range 8 2047
62 default "255" 58 default "255"
63 help 59 help
64 Maximum number of nodes inside a TIPC cluster. Maximum 60 Specifies how many nodes can be supported in a TIPC cluster.
65 supported value is 2047 nodes, minimum is 8. 61 Can range from 8 to 2047 nodes; default is 255.
66
67 Setting this to a smaller value saves some memory,
68 setting it to higher allows more nodes.
69
70config TIPC_SLAVE_NODES
71 int "Maximum number of slave nodes in cluster"
72 depends on TIPC_ADVANCED
73 default "0"
74 help
75 ***This capability is not supported by current code.***
76
77 Maximum number of slave nodes inside a TIPC cluster. Maximum
78 supported value is 2047 nodes, minimum is 0.
79 62
80 Setting this to a smaller value saves some memory, 63 Setting this to a smaller value saves some memory;
81 setting it to higher allows more nodes. 64 setting it to higher allows for more nodes.
82 65
83config TIPC_PORTS 66config TIPC_PORTS
84 int "Maximum number of ports in a node" 67 int "Maximum number of ports in a node"
85 depends on TIPC_ADVANCED 68 depends on TIPC_ADVANCED
69 range 127 65535
86 default "8191" 70 default "8191"
87 help 71 help
88 Maximum number of ports within a node. Maximum 72 Specifies how many ports can be supported by a node.
89 supported value is 64535 nodes, minimum is 127. 73 Can range from 127 to 65535 ports; default is 8191.
90 74
91 Setting this to a smaller value saves some memory, 75 Setting this to a smaller value saves some memory,
92 setting it to higher allows more ports. 76 setting it to higher allows for more ports.
93 77
94config TIPC_LOG 78config TIPC_LOG
95 int "Size of log buffer" 79 int "Size of log buffer"
96 depends on TIPC_ADVANCED 80 depends on TIPC_ADVANCED
97 default 0 81 range 0 32768
82 default "0"
98 help 83 help
99 Size (in bytes) of TIPC's internal log buffer, which records the 84 Size (in bytes) of TIPC's internal log buffer, which records the
100 occurrence of significant events. Maximum supported value 85 occurrence of significant events. Can range from 0 to 32768 bytes;
101 is 32768 bytes, minimum is 0. 86 default is 0.
102 87
103 There is no need to enable the log buffer unless the node will be 88 There is no need to enable the log buffer unless the node will be
104 managed remotely via TIPC. 89 managed remotely via TIPC.
105 90
106config TIPC_DEBUG 91config TIPC_DEBUG
107 bool "Enable debugging support" 92 bool "Enable debug messages"
108 default n 93 default n
109 help 94 help
110 This will enable debugging of TIPC. 95 This enables debugging of TIPC.
111 96
112 Only say Y here if you are having trouble with TIPC. It will 97 Only say Y here if you are having trouble with TIPC. It will
113 enable the display of detailed information about what is going on. 98 enable the display of detailed information about what is going on.
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 3256bd7d398..52c571fedbe 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -189,11 +189,11 @@ static int __init tipc_init(void)
189 tipc_remote_management = 1; 189 tipc_remote_management = 1;
190 tipc_max_publications = 10000; 190 tipc_max_publications = 10000;
191 tipc_max_subscriptions = 2000; 191 tipc_max_subscriptions = 2000;
192 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536); 192 tipc_max_ports = CONFIG_TIPC_PORTS;
193 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255); 193 tipc_max_zones = CONFIG_TIPC_ZONES;
194 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1); 194 tipc_max_clusters = CONFIG_TIPC_CLUSTERS;
195 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047); 195 tipc_max_nodes = CONFIG_TIPC_NODES;
196 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047); 196 tipc_max_slaves = CONFIG_TIPC_SLAVE_NODES;
197 tipc_net_id = 4711; 197 tipc_net_id = 4711;
198 198
199 if ((res = tipc_core_start())) 199 if ((res = tipc_core_start()))
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f2551190311..9bc9b92bc09 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2224,7 +2224,7 @@ static const struct net_proto_family unix_family_ops = {
2224}; 2224};
2225 2225
2226 2226
2227static int unix_net_init(struct net *net) 2227static int __net_init unix_net_init(struct net *net)
2228{ 2228{
2229 int error = -ENOMEM; 2229 int error = -ENOMEM;
2230 2230
@@ -2243,7 +2243,7 @@ out:
2243 return error; 2243 return error;
2244} 2244}
2245 2245
2246static void unix_net_exit(struct net *net) 2246static void __net_exit unix_net_exit(struct net *net)
2247{ 2247{
2248 unix_sysctl_unregister(net); 2248 unix_sysctl_unregister(net);
2249 proc_net_remove(net, "unix"); 2249 proc_net_remove(net, "unix");
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 708f5df6b7f..d095c7be10d 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -31,7 +31,7 @@ static struct ctl_path unix_path[] = {
31 { }, 31 { },
32}; 32};
33 33
34int unix_sysctl_register(struct net *net) 34int __net_init unix_sysctl_register(struct net *net)
35{ 35{
36 struct ctl_table *table; 36 struct ctl_table *table;
37 37
diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore
new file mode 100644
index 00000000000..c33451b896d
--- /dev/null
+++ b/net/wireless/.gitignore
@@ -0,0 +1 @@
regdb.c
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 90e93a5701a..d0ee29063e5 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -94,20 +94,21 @@ config CFG80211_DEBUGFS
94 94
95 If unsure, say N. 95 If unsure, say N.
96 96
97config WIRELESS_OLD_REGULATORY 97config CFG80211_INTERNAL_REGDB
98 bool "Old wireless static regulatory definitions" 98 bool "use statically compiled regulatory rules database" if EMBEDDED
99 default n 99 default n
100 depends on CFG80211 100 depends on CFG80211
101 ---help--- 101 ---help---
102 This option enables the old static regulatory information 102 This option generates an internal data structure representing
103 and uses it within the new framework. This option is available 103 the wireless regulatory rules described in net/wireless/db.txt
104 for historical reasons and it is advised to leave it off. 104 and includes code to query that database. This is an alternative
105 to using CRDA for defining regulatory rules for the kernel.
105 106
106 For details see: 107 For details see:
107 108
108 http://wireless.kernel.org/en/developers/Regulatory 109 http://wireless.kernel.org/en/developers/Regulatory
109 110
110 Say N and if you say Y, please tell us why. The default is N. 111 Most distributions have a CRDA package. So if unsure, say N.
111 112
112config CFG80211_WEXT 113config CFG80211_WEXT
113 bool "cfg80211 wireless extensions compatibility" 114 bool "cfg80211 wireless extensions compatibility"
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index f07c8dc7aab..e77e508126f 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -13,5 +13,11 @@ cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o 13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o
14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o 14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o 15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
16cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
16 17
17ccflags-y += -D__CHECK_ENDIAN__ 18ccflags-y += -D__CHECK_ENDIAN__
19
20$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
21 @$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
22
23clean-files := regdb.c
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index a46ac6c9b36..bf1737fc9a7 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -41,44 +41,57 @@ rdev_fixed_channel(struct cfg80211_registered_device *rdev,
41 return result; 41 return result;
42} 42}
43 43
44int rdev_set_freq(struct cfg80211_registered_device *rdev, 44struct ieee80211_channel *
45 struct wireless_dev *for_wdev, 45rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
46 int freq, enum nl80211_channel_type channel_type) 46 int freq, enum nl80211_channel_type channel_type)
47{ 47{
48 struct ieee80211_channel *chan; 48 struct ieee80211_channel *chan;
49 struct ieee80211_sta_ht_cap *ht_cap; 49 struct ieee80211_sta_ht_cap *ht_cap;
50 int result;
51
52 if (rdev_fixed_channel(rdev, for_wdev))
53 return -EBUSY;
54
55 if (!rdev->ops->set_channel)
56 return -EOPNOTSUPP;
57 50
58 chan = ieee80211_get_channel(&rdev->wiphy, freq); 51 chan = ieee80211_get_channel(&rdev->wiphy, freq);
59 52
60 /* Primary channel not allowed */ 53 /* Primary channel not allowed */
61 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) 54 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
62 return -EINVAL; 55 return NULL;
63 56
64 if (channel_type == NL80211_CHAN_HT40MINUS && 57 if (channel_type == NL80211_CHAN_HT40MINUS &&
65 chan->flags & IEEE80211_CHAN_NO_HT40MINUS) 58 chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
66 return -EINVAL; 59 return NULL;
67 else if (channel_type == NL80211_CHAN_HT40PLUS && 60 else if (channel_type == NL80211_CHAN_HT40PLUS &&
68 chan->flags & IEEE80211_CHAN_NO_HT40PLUS) 61 chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
69 return -EINVAL; 62 return NULL;
70 63
71 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; 64 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
72 65
73 if (channel_type != NL80211_CHAN_NO_HT) { 66 if (channel_type != NL80211_CHAN_NO_HT) {
74 if (!ht_cap->ht_supported) 67 if (!ht_cap->ht_supported)
75 return -EINVAL; 68 return NULL;
76 69
77 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || 70 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
78 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) 71 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
79 return -EINVAL; 72 return NULL;
80 } 73 }
81 74
75 return chan;
76}
77
78int rdev_set_freq(struct cfg80211_registered_device *rdev,
79 struct wireless_dev *for_wdev,
80 int freq, enum nl80211_channel_type channel_type)
81{
82 struct ieee80211_channel *chan;
83 int result;
84
85 if (rdev_fixed_channel(rdev, for_wdev))
86 return -EBUSY;
87
88 if (!rdev->ops->set_channel)
89 return -EOPNOTSUPP;
90
91 chan = rdev_freq_to_chan(rdev, freq, channel_type);
92 if (!chan)
93 return -EINVAL;
94
82 result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type); 95 result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
83 if (result) 96 if (result)
84 return result; 97 return result;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 92b81244248..20db90246de 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -402,6 +402,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
402 rdev->wiphy.retry_long = 4; 402 rdev->wiphy.retry_long = 4;
403 rdev->wiphy.frag_threshold = (u32) -1; 403 rdev->wiphy.frag_threshold = (u32) -1;
404 rdev->wiphy.rts_threshold = (u32) -1; 404 rdev->wiphy.rts_threshold = (u32) -1;
405 rdev->wiphy.coverage_class = 0;
405 406
406 return &rdev->wiphy; 407 return &rdev->wiphy;
407} 408}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 4ef3efc9410..2d6a6b9c0c4 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -111,7 +111,8 @@ struct cfg80211_internal_bss {
111 unsigned long ts; 111 unsigned long ts;
112 struct kref ref; 112 struct kref ref;
113 atomic_t hold; 113 atomic_t hold;
114 bool ies_allocated; 114 bool beacon_ies_allocated;
115 bool proberesp_ies_allocated;
115 116
116 /* must be last because of priv member */ 117 /* must be last because of priv member */
117 struct cfg80211_bss pub; 118 struct cfg80211_bss pub;
@@ -374,10 +375,15 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
374struct ieee80211_channel * 375struct ieee80211_channel *
375rdev_fixed_channel(struct cfg80211_registered_device *rdev, 376rdev_fixed_channel(struct cfg80211_registered_device *rdev,
376 struct wireless_dev *for_wdev); 377 struct wireless_dev *for_wdev);
378struct ieee80211_channel *
379rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
380 int freq, enum nl80211_channel_type channel_type);
377int rdev_set_freq(struct cfg80211_registered_device *rdev, 381int rdev_set_freq(struct cfg80211_registered_device *rdev,
378 struct wireless_dev *for_wdev, 382 struct wireless_dev *for_wdev,
379 int freq, enum nl80211_channel_type channel_type); 383 int freq, enum nl80211_channel_type channel_type);
380 384
385u16 cfg80211_calculate_bitrate(struct rate_info *rate);
386
381#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 387#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
382#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) 388#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
383#else 389#else
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
new file mode 100644
index 00000000000..a2fc3a09ccd
--- /dev/null
+++ b/net/wireless/db.txt
@@ -0,0 +1,17 @@
1#
2# This file is a placeholder to prevent accidental build breakage if someone
3# enables CONFIG_CFG80211_INTERNAL_REGDB. Almost no one actually needs to
4# enable that build option.
5#
6# You should be using CRDA instead. It is even better if you use the CRDA
7# package provided by your distribution, since they will probably keep it
8# up-to-date on your behalf.
9#
10# If you _really_ intend to use CONFIG_CFG80211_INTERNAL_REGDB then you will
11# need to replace this file with one containing appropriately formatted
12# regulatory rules that cover the regulatory domains you will be using. Your
13# best option is to extract the db.txt file from the wireless-regdb git
14# repository:
15#
16# git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
17#
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
new file mode 100644
index 00000000000..3cc9e69880a
--- /dev/null
+++ b/net/wireless/genregdb.awk
@@ -0,0 +1,118 @@
1#!/usr/bin/awk -f
2#
3# genregdb.awk -- generate regdb.c from db.txt
4#
5# Actually, it reads from stdin (presumed to be db.txt) and writes
6# to stdout (presumed to be regdb.c), but close enough...
7#
8# Copyright 2009 John W. Linville <linville@tuxdriver.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2 as
12# published by the Free Software Foundation.
13#
14
15BEGIN {
16 active = 0
17 rules = 0;
18 print "/*"
19 print " * DO NOT EDIT -- file generated from data in db.txt"
20 print " */"
21 print ""
22 print "#include <linux/nl80211.h>"
23 print "#include <net/cfg80211.h>"
24 print ""
25 regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n"
26}
27
28/^[ \t]*#/ {
29 # Ignore
30}
31
32!active && /^[ \t]*$/ {
33 # Ignore
34}
35
36!active && /country/ {
37 country=$2
38 sub(/:/, "", country)
39 printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
40 printf "\t.alpha2 = \"%s\",\n", country
41 printf "\t.reg_rules = {\n"
42 active = 1
43 regdb = regdb "\t&regdom_" country ",\n"
44}
45
46active && /^[ \t]*\(/ {
47 start = $1
48 sub(/\(/, "", start)
49 end = $3
50 bw = $5
51 sub(/\),/, "", bw)
52 gain = $6
53 sub(/\(/, "", gain)
54 sub(/,/, "", gain)
55 power = $7
56 sub(/\)/, "", power)
57 sub(/,/, "", power)
58 # power might be in mW...
59 units = $8
60 sub(/\)/, "", units)
61 sub(/,/, "", units)
62 if (units == "mW") {
63 if (power == 100) {
64 power = 20
65 } else if (power == 200) {
66 power = 23
67 } else if (power == 500) {
68 power = 27
69 } else if (power == 1000) {
70 power = 30
71 } else {
72 print "Unknown power value in database!"
73 }
74 }
75 flagstr = ""
76 for (i=8; i<=NF; i++)
77 flagstr = flagstr $i
78 split(flagstr, flagarray, ",")
79 flags = ""
80 for (arg in flagarray) {
81 if (flagarray[arg] == "NO-OFDM") {
82 flags = flags "\n\t\t\tNL80211_RRF_NO_OFDM | "
83 } else if (flagarray[arg] == "NO-CCK") {
84 flags = flags "\n\t\t\tNL80211_RRF_NO_CCK | "
85 } else if (flagarray[arg] == "NO-INDOOR") {
86 flags = flags "\n\t\t\tNL80211_RRF_NO_INDOOR | "
87 } else if (flagarray[arg] == "NO-OUTDOOR") {
88 flags = flags "\n\t\t\tNL80211_RRF_NO_OUTDOOR | "
89 } else if (flagarray[arg] == "DFS") {
90 flags = flags "\n\t\t\tNL80211_RRF_DFS | "
91 } else if (flagarray[arg] == "PTP-ONLY") {
92 flags = flags "\n\t\t\tNL80211_RRF_PTP_ONLY | "
93 } else if (flagarray[arg] == "PTMP-ONLY") {
94 flags = flags "\n\t\t\tNL80211_RRF_PTMP_ONLY | "
95 } else if (flagarray[arg] == "PASSIVE-SCAN") {
96 flags = flags "\n\t\t\tNL80211_RRF_PASSIVE_SCAN | "
97 } else if (flagarray[arg] == "NO-IBSS") {
98 flags = flags "\n\t\t\tNL80211_RRF_NO_IBSS | "
99 }
100 }
101 flags = flags "0"
102 printf "\t\tREG_RULE(%d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, flags
103 rules++
104}
105
106active && /^[ \t]*$/ {
107 active = 0
108 printf "\t},\n"
109 printf "\t.n_reg_rules = %d\n", rules
110 printf "};\n\n"
111 rules = 0;
112}
113
114END {
115 print regdb "};"
116 print ""
117 print "int reg_regdb_size = ARRAY_SIZE(reg_regdb);"
118}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 82e6002c8d6..94d151f6f73 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -148,22 +148,23 @@ void __cfg80211_send_deauth(struct net_device *dev,
148 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 148 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
149 const u8 *bssid = mgmt->bssid; 149 const u8 *bssid = mgmt->bssid;
150 int i; 150 int i;
151 bool found = false;
151 152
152 ASSERT_WDEV_LOCK(wdev); 153 ASSERT_WDEV_LOCK(wdev);
153 154
154 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
155
156 if (wdev->current_bss && 155 if (wdev->current_bss &&
157 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 156 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
158 cfg80211_unhold_bss(wdev->current_bss); 157 cfg80211_unhold_bss(wdev->current_bss);
159 cfg80211_put_bss(&wdev->current_bss->pub); 158 cfg80211_put_bss(&wdev->current_bss->pub);
160 wdev->current_bss = NULL; 159 wdev->current_bss = NULL;
160 found = true;
161 } else for (i = 0; i < MAX_AUTH_BSSES; i++) { 161 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
162 if (wdev->auth_bsses[i] && 162 if (wdev->auth_bsses[i] &&
163 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { 163 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
164 cfg80211_unhold_bss(wdev->auth_bsses[i]); 164 cfg80211_unhold_bss(wdev->auth_bsses[i]);
165 cfg80211_put_bss(&wdev->auth_bsses[i]->pub); 165 cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
166 wdev->auth_bsses[i] = NULL; 166 wdev->auth_bsses[i] = NULL;
167 found = true;
167 break; 168 break;
168 } 169 }
169 if (wdev->authtry_bsses[i] && 170 if (wdev->authtry_bsses[i] &&
@@ -171,10 +172,16 @@ void __cfg80211_send_deauth(struct net_device *dev,
171 cfg80211_unhold_bss(wdev->authtry_bsses[i]); 172 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
172 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); 173 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
173 wdev->authtry_bsses[i] = NULL; 174 wdev->authtry_bsses[i] = NULL;
175 found = true;
174 break; 176 break;
175 } 177 }
176 } 178 }
177 179
180 if (!found)
181 return;
182
183 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
184
178 if (wdev->sme_state == CFG80211_SME_CONNECTED) { 185 if (wdev->sme_state == CFG80211_SME_CONNECTED) {
179 u16 reason_code; 186 u16 reason_code;
180 bool from_ap; 187 bool from_ap;
@@ -684,3 +691,40 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
684 } 691 }
685 } 692 }
686} 693}
694
695void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
696 struct ieee80211_channel *chan,
697 enum nl80211_channel_type channel_type,
698 unsigned int duration, gfp_t gfp)
699{
700 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
701 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
702
703 nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type,
704 duration, gfp);
705}
706EXPORT_SYMBOL(cfg80211_ready_on_channel);
707
708void cfg80211_remain_on_channel_expired(struct net_device *dev,
709 u64 cookie,
710 struct ieee80211_channel *chan,
711 enum nl80211_channel_type channel_type,
712 gfp_t gfp)
713{
714 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
715 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
716
717 nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan,
718 channel_type, gfp);
719}
720EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
721
722void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
723 struct station_info *sinfo, gfp_t gfp)
724{
725 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
726 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
727
728 nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
729}
730EXPORT_SYMBOL(cfg80211_new_sta);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a6028433e3a..4af7991a9ec 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -69,6 +69,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
69 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, 69 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
70 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, 70 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
71 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, 71 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
72 [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 },
72 73
73 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, 74 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
74 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, 75 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
@@ -141,6 +142,9 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
141 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, 142 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
142 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, 143 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
143 .len = WLAN_PMKID_LEN }, 144 .len = WLAN_PMKID_LEN },
145 [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
146 [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
147 [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
144}; 148};
145 149
146/* policy for the attributes */ 150/* policy for the attributes */
@@ -442,6 +446,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
442 dev->wiphy.frag_threshold); 446 dev->wiphy.frag_threshold);
443 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, 447 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
444 dev->wiphy.rts_threshold); 448 dev->wiphy.rts_threshold);
449 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
450 dev->wiphy.coverage_class);
445 451
446 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 452 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
447 dev->wiphy.max_scan_ssids); 453 dev->wiphy.max_scan_ssids);
@@ -569,6 +575,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
569 CMD(set_pmksa, SET_PMKSA); 575 CMD(set_pmksa, SET_PMKSA);
570 CMD(del_pmksa, DEL_PMKSA); 576 CMD(del_pmksa, DEL_PMKSA);
571 CMD(flush_pmksa, FLUSH_PMKSA); 577 CMD(flush_pmksa, FLUSH_PMKSA);
578 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
579 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
572 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 580 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
573 i++; 581 i++;
574 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 582 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -681,6 +689,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
681 u32 changed; 689 u32 changed;
682 u8 retry_short = 0, retry_long = 0; 690 u8 retry_short = 0, retry_long = 0;
683 u32 frag_threshold = 0, rts_threshold = 0; 691 u32 frag_threshold = 0, rts_threshold = 0;
692 u8 coverage_class = 0;
684 693
685 rtnl_lock(); 694 rtnl_lock();
686 695
@@ -803,9 +812,16 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
803 changed |= WIPHY_PARAM_RTS_THRESHOLD; 812 changed |= WIPHY_PARAM_RTS_THRESHOLD;
804 } 813 }
805 814
815 if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) {
816 coverage_class = nla_get_u8(
817 info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]);
818 changed |= WIPHY_PARAM_COVERAGE_CLASS;
819 }
820
806 if (changed) { 821 if (changed) {
807 u8 old_retry_short, old_retry_long; 822 u8 old_retry_short, old_retry_long;
808 u32 old_frag_threshold, old_rts_threshold; 823 u32 old_frag_threshold, old_rts_threshold;
824 u8 old_coverage_class;
809 825
810 if (!rdev->ops->set_wiphy_params) { 826 if (!rdev->ops->set_wiphy_params) {
811 result = -EOPNOTSUPP; 827 result = -EOPNOTSUPP;
@@ -816,6 +832,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
816 old_retry_long = rdev->wiphy.retry_long; 832 old_retry_long = rdev->wiphy.retry_long;
817 old_frag_threshold = rdev->wiphy.frag_threshold; 833 old_frag_threshold = rdev->wiphy.frag_threshold;
818 old_rts_threshold = rdev->wiphy.rts_threshold; 834 old_rts_threshold = rdev->wiphy.rts_threshold;
835 old_coverage_class = rdev->wiphy.coverage_class;
819 836
820 if (changed & WIPHY_PARAM_RETRY_SHORT) 837 if (changed & WIPHY_PARAM_RETRY_SHORT)
821 rdev->wiphy.retry_short = retry_short; 838 rdev->wiphy.retry_short = retry_short;
@@ -825,6 +842,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
825 rdev->wiphy.frag_threshold = frag_threshold; 842 rdev->wiphy.frag_threshold = frag_threshold;
826 if (changed & WIPHY_PARAM_RTS_THRESHOLD) 843 if (changed & WIPHY_PARAM_RTS_THRESHOLD)
827 rdev->wiphy.rts_threshold = rts_threshold; 844 rdev->wiphy.rts_threshold = rts_threshold;
845 if (changed & WIPHY_PARAM_COVERAGE_CLASS)
846 rdev->wiphy.coverage_class = coverage_class;
828 847
829 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); 848 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
830 if (result) { 849 if (result) {
@@ -832,6 +851,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
832 rdev->wiphy.retry_long = old_retry_long; 851 rdev->wiphy.retry_long = old_retry_long;
833 rdev->wiphy.frag_threshold = old_frag_threshold; 852 rdev->wiphy.frag_threshold = old_frag_threshold;
834 rdev->wiphy.rts_threshold = old_rts_threshold; 853 rdev->wiphy.rts_threshold = old_rts_threshold;
854 rdev->wiphy.coverage_class = old_coverage_class;
835 } 855 }
836 } 856 }
837 857
@@ -1637,42 +1657,9 @@ static int parse_station_flags(struct genl_info *info,
1637 return 0; 1657 return 0;
1638} 1658}
1639 1659
1640static u16 nl80211_calculate_bitrate(struct rate_info *rate)
1641{
1642 int modulation, streams, bitrate;
1643
1644 if (!(rate->flags & RATE_INFO_FLAGS_MCS))
1645 return rate->legacy;
1646
1647 /* the formula below does only work for MCS values smaller than 32 */
1648 if (rate->mcs >= 32)
1649 return 0;
1650
1651 modulation = rate->mcs & 7;
1652 streams = (rate->mcs >> 3) + 1;
1653
1654 bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
1655 13500000 : 6500000;
1656
1657 if (modulation < 4)
1658 bitrate *= (modulation + 1);
1659 else if (modulation == 4)
1660 bitrate *= (modulation + 2);
1661 else
1662 bitrate *= (modulation + 3);
1663
1664 bitrate *= streams;
1665
1666 if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
1667 bitrate = (bitrate / 9) * 10;
1668
1669 /* do NOT round down here */
1670 return (bitrate + 50000) / 100000;
1671}
1672
1673static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, 1660static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1674 int flags, struct net_device *dev, 1661 int flags, struct net_device *dev,
1675 u8 *mac_addr, struct station_info *sinfo) 1662 const u8 *mac_addr, struct station_info *sinfo)
1676{ 1663{
1677 void *hdr; 1664 void *hdr;
1678 struct nlattr *sinfoattr, *txrate; 1665 struct nlattr *sinfoattr, *txrate;
@@ -1716,8 +1703,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1716 if (!txrate) 1703 if (!txrate)
1717 goto nla_put_failure; 1704 goto nla_put_failure;
1718 1705
1719 /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */ 1706 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
1720 bitrate = nl80211_calculate_bitrate(&sinfo->txrate); 1707 bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
1721 if (bitrate > 0) 1708 if (bitrate > 0)
1722 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); 1709 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
1723 1710
@@ -2583,12 +2570,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
2583 2570
2584 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); 2571 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
2585 2572
2586#ifdef CONFIG_WIRELESS_OLD_REGULATORY
2587 /* We ignore world regdom requests with the old regdom setup */
2588 if (is_world_regdom(data))
2589 return -EINVAL;
2590#endif
2591
2592 r = regulatory_hint_user(data); 2573 r = regulatory_hint_user(data);
2593 2574
2594 return r; 2575 return r;
@@ -3182,6 +3163,10 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
3182 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, 3163 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
3183 res->len_information_elements, 3164 res->len_information_elements,
3184 res->information_elements); 3165 res->information_elements);
3166 if (res->beacon_ies && res->len_beacon_ies &&
3167 res->beacon_ies != res->information_elements)
3168 NLA_PUT(msg, NL80211_BSS_BEACON_IES,
3169 res->len_beacon_ies, res->beacon_ies);
3185 if (res->tsf) 3170 if (res->tsf)
3186 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); 3171 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
3187 if (res->beacon_interval) 3172 if (res->beacon_interval)
@@ -4322,6 +4307,246 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
4322 4307
4323} 4308}
4324 4309
4310static int nl80211_remain_on_channel(struct sk_buff *skb,
4311 struct genl_info *info)
4312{
4313 struct cfg80211_registered_device *rdev;
4314 struct net_device *dev;
4315 struct ieee80211_channel *chan;
4316 struct sk_buff *msg;
4317 void *hdr;
4318 u64 cookie;
4319 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
4320 u32 freq, duration;
4321 int err;
4322
4323 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
4324 !info->attrs[NL80211_ATTR_DURATION])
4325 return -EINVAL;
4326
4327 duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
4328
4329 /*
4330 * We should be on that channel for at least one jiffie,
4331 * and more than 5 seconds seems excessive.
4332 */
4333 if (!duration || !msecs_to_jiffies(duration) || duration > 5000)
4334 return -EINVAL;
4335
4336 rtnl_lock();
4337
4338 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4339 if (err)
4340 goto unlock_rtnl;
4341
4342 if (!rdev->ops->remain_on_channel) {
4343 err = -EOPNOTSUPP;
4344 goto out;
4345 }
4346
4347 if (!netif_running(dev)) {
4348 err = -ENETDOWN;
4349 goto out;
4350 }
4351
4352 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
4353 channel_type = nla_get_u32(
4354 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
4355 if (channel_type != NL80211_CHAN_NO_HT &&
4356 channel_type != NL80211_CHAN_HT20 &&
4357 channel_type != NL80211_CHAN_HT40PLUS &&
4358 channel_type != NL80211_CHAN_HT40MINUS)
4359 err = -EINVAL;
4360 goto out;
4361 }
4362
4363 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
4364 chan = rdev_freq_to_chan(rdev, freq, channel_type);
4365 if (chan == NULL) {
4366 err = -EINVAL;
4367 goto out;
4368 }
4369
4370 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
4371 if (!msg) {
4372 err = -ENOMEM;
4373 goto out;
4374 }
4375
4376 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4377 NL80211_CMD_REMAIN_ON_CHANNEL);
4378
4379 if (IS_ERR(hdr)) {
4380 err = PTR_ERR(hdr);
4381 goto free_msg;
4382 }
4383
4384 err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan,
4385 channel_type, duration, &cookie);
4386
4387 if (err)
4388 goto free_msg;
4389
4390 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
4391
4392 genlmsg_end(msg, hdr);
4393 err = genlmsg_reply(msg, info);
4394 goto out;
4395
4396 nla_put_failure:
4397 err = -ENOBUFS;
4398 free_msg:
4399 nlmsg_free(msg);
4400 out:
4401 cfg80211_unlock_rdev(rdev);
4402 dev_put(dev);
4403 unlock_rtnl:
4404 rtnl_unlock();
4405 return err;
4406}
4407
4408static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
4409 struct genl_info *info)
4410{
4411 struct cfg80211_registered_device *rdev;
4412 struct net_device *dev;
4413 u64 cookie;
4414 int err;
4415
4416 if (!info->attrs[NL80211_ATTR_COOKIE])
4417 return -EINVAL;
4418
4419 rtnl_lock();
4420
4421 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4422 if (err)
4423 goto unlock_rtnl;
4424
4425 if (!rdev->ops->cancel_remain_on_channel) {
4426 err = -EOPNOTSUPP;
4427 goto out;
4428 }
4429
4430 if (!netif_running(dev)) {
4431 err = -ENETDOWN;
4432 goto out;
4433 }
4434
4435 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
4436
4437 err = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie);
4438
4439 out:
4440 cfg80211_unlock_rdev(rdev);
4441 dev_put(dev);
4442 unlock_rtnl:
4443 rtnl_unlock();
4444 return err;
4445}
4446
4447static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
4448 u8 *rates, u8 rates_len)
4449{
4450 u8 i;
4451 u32 mask = 0;
4452
4453 for (i = 0; i < rates_len; i++) {
4454 int rate = (rates[i] & 0x7f) * 5;
4455 int ridx;
4456 for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
4457 struct ieee80211_rate *srate =
4458 &sband->bitrates[ridx];
4459 if (rate == srate->bitrate) {
4460 mask |= 1 << ridx;
4461 break;
4462 }
4463 }
4464 if (ridx == sband->n_bitrates)
4465 return 0; /* rate not found */
4466 }
4467
4468 return mask;
4469}
4470
4471static struct nla_policy
4472nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] __read_mostly = {
4473 [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
4474 .len = NL80211_MAX_SUPP_RATES },
4475};
4476
4477static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
4478 struct genl_info *info)
4479{
4480 struct nlattr *tb[NL80211_TXRATE_MAX + 1];
4481 struct cfg80211_registered_device *rdev;
4482 struct cfg80211_bitrate_mask mask;
4483 int err, rem, i;
4484 struct net_device *dev;
4485 struct nlattr *tx_rates;
4486 struct ieee80211_supported_band *sband;
4487
4488 if (info->attrs[NL80211_ATTR_TX_RATES] == NULL)
4489 return -EINVAL;
4490
4491 rtnl_lock();
4492
4493 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4494 if (err)
4495 goto unlock_rtnl;
4496
4497 if (!rdev->ops->set_bitrate_mask) {
4498 err = -EOPNOTSUPP;
4499 goto unlock;
4500 }
4501
4502 memset(&mask, 0, sizeof(mask));
4503 /* Default to all rates enabled */
4504 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
4505 sband = rdev->wiphy.bands[i];
4506 mask.control[i].legacy =
4507 sband ? (1 << sband->n_bitrates) - 1 : 0;
4508 }
4509
4510 /*
4511 * The nested attribute uses enum nl80211_band as the index. This maps
4512 * directly to the enum ieee80211_band values used in cfg80211.
4513 */
4514 nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
4515 {
4516 enum ieee80211_band band = nla_type(tx_rates);
4517 if (band < 0 || band >= IEEE80211_NUM_BANDS) {
4518 err = -EINVAL;
4519 goto unlock;
4520 }
4521 sband = rdev->wiphy.bands[band];
4522 if (sband == NULL) {
4523 err = -EINVAL;
4524 goto unlock;
4525 }
4526 nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
4527 nla_len(tx_rates), nl80211_txattr_policy);
4528 if (tb[NL80211_TXRATE_LEGACY]) {
4529 mask.control[band].legacy = rateset_to_mask(
4530 sband,
4531 nla_data(tb[NL80211_TXRATE_LEGACY]),
4532 nla_len(tb[NL80211_TXRATE_LEGACY]));
4533 if (mask.control[band].legacy == 0) {
4534 err = -EINVAL;
4535 goto unlock;
4536 }
4537 }
4538 }
4539
4540 err = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask);
4541
4542 unlock:
4543 dev_put(dev);
4544 cfg80211_unlock_rdev(rdev);
4545 unlock_rtnl:
4546 rtnl_unlock();
4547 return err;
4548}
4549
4325static struct genl_ops nl80211_ops[] = { 4550static struct genl_ops nl80211_ops[] = {
4326 { 4551 {
4327 .cmd = NL80211_CMD_GET_WIPHY, 4552 .cmd = NL80211_CMD_GET_WIPHY,
@@ -4584,8 +4809,26 @@ static struct genl_ops nl80211_ops[] = {
4584 .policy = nl80211_policy, 4809 .policy = nl80211_policy,
4585 .flags = GENL_ADMIN_PERM, 4810 .flags = GENL_ADMIN_PERM,
4586 }, 4811 },
4587 4812 {
4813 .cmd = NL80211_CMD_REMAIN_ON_CHANNEL,
4814 .doit = nl80211_remain_on_channel,
4815 .policy = nl80211_policy,
4816 .flags = GENL_ADMIN_PERM,
4817 },
4818 {
4819 .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
4820 .doit = nl80211_cancel_remain_on_channel,
4821 .policy = nl80211_policy,
4822 .flags = GENL_ADMIN_PERM,
4823 },
4824 {
4825 .cmd = NL80211_CMD_SET_TX_BITRATE_MASK,
4826 .doit = nl80211_set_tx_bitrate_mask,
4827 .policy = nl80211_policy,
4828 .flags = GENL_ADMIN_PERM,
4829 },
4588}; 4830};
4831
4589static struct genl_multicast_group nl80211_mlme_mcgrp = { 4832static struct genl_multicast_group nl80211_mlme_mcgrp = {
4590 .name = "mlme", 4833 .name = "mlme",
4591}; 4834};
@@ -5173,6 +5416,89 @@ nla_put_failure:
5173 nlmsg_free(msg); 5416 nlmsg_free(msg);
5174} 5417}
5175 5418
5419static void nl80211_send_remain_on_chan_event(
5420 int cmd, struct cfg80211_registered_device *rdev,
5421 struct net_device *netdev, u64 cookie,
5422 struct ieee80211_channel *chan,
5423 enum nl80211_channel_type channel_type,
5424 unsigned int duration, gfp_t gfp)
5425{
5426 struct sk_buff *msg;
5427 void *hdr;
5428
5429 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
5430 if (!msg)
5431 return;
5432
5433 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
5434 if (!hdr) {
5435 nlmsg_free(msg);
5436 return;
5437 }
5438
5439 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5440 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5441 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
5442 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
5443 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
5444
5445 if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
5446 NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
5447
5448 if (genlmsg_end(msg, hdr) < 0) {
5449 nlmsg_free(msg);
5450 return;
5451 }
5452
5453 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5454 nl80211_mlme_mcgrp.id, gfp);
5455 return;
5456
5457 nla_put_failure:
5458 genlmsg_cancel(msg, hdr);
5459 nlmsg_free(msg);
5460}
5461
5462void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
5463 struct net_device *netdev, u64 cookie,
5464 struct ieee80211_channel *chan,
5465 enum nl80211_channel_type channel_type,
5466 unsigned int duration, gfp_t gfp)
5467{
5468 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
5469 rdev, netdev, cookie, chan,
5470 channel_type, duration, gfp);
5471}
5472
5473void nl80211_send_remain_on_channel_cancel(
5474 struct cfg80211_registered_device *rdev, struct net_device *netdev,
5475 u64 cookie, struct ieee80211_channel *chan,
5476 enum nl80211_channel_type channel_type, gfp_t gfp)
5477{
5478 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
5479 rdev, netdev, cookie, chan,
5480 channel_type, 0, gfp);
5481}
5482
5483void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
5484 struct net_device *dev, const u8 *mac_addr,
5485 struct station_info *sinfo, gfp_t gfp)
5486{
5487 struct sk_buff *msg;
5488
5489 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
5490 if (!msg)
5491 return;
5492
5493 if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) {
5494 nlmsg_free(msg);
5495 return;
5496 }
5497
5498 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5499 nl80211_mlme_mcgrp.id, gfp);
5500}
5501
5176/* initialisation/exit functions */ 5502/* initialisation/exit functions */
5177 5503
5178int nl80211_init(void) 5504int nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 44cc2a76a1b..14855b8fb43 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -59,4 +59,19 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
59 struct net_device *netdev, const u8 *bssid, 59 struct net_device *netdev, const u8 *bssid,
60 gfp_t gfp); 60 gfp_t gfp);
61 61
62void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
63 struct net_device *netdev,
64 u64 cookie,
65 struct ieee80211_channel *chan,
66 enum nl80211_channel_type channel_type,
67 unsigned int duration, gfp_t gfp);
68void nl80211_send_remain_on_channel_cancel(
69 struct cfg80211_registered_device *rdev, struct net_device *netdev,
70 u64 cookie, struct ieee80211_channel *chan,
71 enum nl80211_channel_type channel_type, gfp_t gfp);
72
73void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
74 struct net_device *dev, const u8 *mac_addr,
75 struct station_info *sinfo, gfp_t gfp);
76
62#endif /* __NET_WIRELESS_NL80211_H */ 77#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7a0754c92df..5f8071de795 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -40,8 +40,18 @@
40#include <net/cfg80211.h> 40#include <net/cfg80211.h>
41#include "core.h" 41#include "core.h"
42#include "reg.h" 42#include "reg.h"
43#include "regdb.h"
43#include "nl80211.h" 44#include "nl80211.h"
44 45
46#ifdef CONFIG_CFG80211_REG_DEBUG
47#define REG_DBG_PRINT(format, args...) \
48 do { \
49 printk(KERN_DEBUG format , ## args); \
50 } while (0)
51#else
52#define REG_DBG_PRINT(args...)
53#endif
54
45/* Receipt of information from last regulatory request */ 55/* Receipt of information from last regulatory request */
46static struct regulatory_request *last_request; 56static struct regulatory_request *last_request;
47 57
@@ -128,78 +138,6 @@ static char *ieee80211_regdom = "00";
128module_param(ieee80211_regdom, charp, 0444); 138module_param(ieee80211_regdom, charp, 0444);
129MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 139MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
130 140
131#ifdef CONFIG_WIRELESS_OLD_REGULATORY
132/*
133 * We assume 40 MHz bandwidth for the old regulatory work.
134 * We make emphasis we are using the exact same frequencies
135 * as before
136 */
137
138static const struct ieee80211_regdomain us_regdom = {
139 .n_reg_rules = 6,
140 .alpha2 = "US",
141 .reg_rules = {
142 /* IEEE 802.11b/g, channels 1..11 */
143 REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
144 /* IEEE 802.11a, channel 36..48 */
145 REG_RULE(5180-10, 5240+10, 40, 6, 17, 0),
146 /* IEEE 802.11a, channels 48..64 */
147 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
148 /* IEEE 802.11a, channels 100..124 */
149 REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS),
150 /* IEEE 802.11a, channels 132..144 */
151 REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS),
152 /* IEEE 802.11a, channels 149..165, outdoor */
153 REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
154 }
155};
156
157static const struct ieee80211_regdomain jp_regdom = {
158 .n_reg_rules = 6,
159 .alpha2 = "JP",
160 .reg_rules = {
161 /* IEEE 802.11b/g, channels 1..11 */
162 REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
163 /* IEEE 802.11b/g, channels 12..13 */
164 REG_RULE(2467-10, 2472+10, 20, 6, 20, 0),
165 /* IEEE 802.11b/g, channel 14 */
166 REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM),
167 /* IEEE 802.11a, channels 36..48 */
168 REG_RULE(5180-10, 5240+10, 40, 6, 20, 0),
169 /* IEEE 802.11a, channels 52..64 */
170 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
171 /* IEEE 802.11a, channels 100..144 */
172 REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS),
173 }
174};
175
176static const struct ieee80211_regdomain *static_regdom(char *alpha2)
177{
178 if (alpha2[0] == 'U' && alpha2[1] == 'S')
179 return &us_regdom;
180 if (alpha2[0] == 'J' && alpha2[1] == 'P')
181 return &jp_regdom;
182 /* Use world roaming rules for "EU", since it was a pseudo
183 domain anyway... */
184 if (alpha2[0] == 'E' && alpha2[1] == 'U')
185 return &world_regdom;
186 /* Default, world roaming rules */
187 return &world_regdom;
188}
189
190static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
191{
192 if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom)
193 return true;
194 return false;
195}
196#else
197static inline bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
198{
199 return false;
200}
201#endif
202
203static void reset_regdomains(void) 141static void reset_regdomains(void)
204{ 142{
205 /* avoid freeing static information or freeing something twice */ 143 /* avoid freeing static information or freeing something twice */
@@ -209,8 +147,6 @@ static void reset_regdomains(void)
209 cfg80211_world_regdom = NULL; 147 cfg80211_world_regdom = NULL;
210 if (cfg80211_regdomain == &world_regdom) 148 if (cfg80211_regdomain == &world_regdom)
211 cfg80211_regdomain = NULL; 149 cfg80211_regdomain = NULL;
212 if (is_old_static_regdom(cfg80211_regdomain))
213 cfg80211_regdomain = NULL;
214 150
215 kfree(cfg80211_regdomain); 151 kfree(cfg80211_regdomain);
216 kfree(cfg80211_world_regdom); 152 kfree(cfg80211_world_regdom);
@@ -335,6 +271,98 @@ static bool country_ie_integrity_changes(u32 checksum)
335 return false; 271 return false;
336} 272}
337 273
274static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
275 const struct ieee80211_regdomain *src_regd)
276{
277 struct ieee80211_regdomain *regd;
278 int size_of_regd = 0;
279 unsigned int i;
280
281 size_of_regd = sizeof(struct ieee80211_regdomain) +
282 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
283
284 regd = kzalloc(size_of_regd, GFP_KERNEL);
285 if (!regd)
286 return -ENOMEM;
287
288 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
289
290 for (i = 0; i < src_regd->n_reg_rules; i++)
291 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
292 sizeof(struct ieee80211_reg_rule));
293
294 *dst_regd = regd;
295 return 0;
296}
297
298#ifdef CONFIG_CFG80211_INTERNAL_REGDB
299struct reg_regdb_search_request {
300 char alpha2[2];
301 struct list_head list;
302};
303
304static LIST_HEAD(reg_regdb_search_list);
305static DEFINE_SPINLOCK(reg_regdb_search_lock);
306
307static void reg_regdb_search(struct work_struct *work)
308{
309 struct reg_regdb_search_request *request;
310 const struct ieee80211_regdomain *curdom, *regdom;
311 int i, r;
312
313 spin_lock(&reg_regdb_search_lock);
314 while (!list_empty(&reg_regdb_search_list)) {
315 request = list_first_entry(&reg_regdb_search_list,
316 struct reg_regdb_search_request,
317 list);
318 list_del(&request->list);
319
320 for (i=0; i<reg_regdb_size; i++) {
321 curdom = reg_regdb[i];
322
323 if (!memcmp(request->alpha2, curdom->alpha2, 2)) {
324 r = reg_copy_regd(&regdom, curdom);
325 if (r)
326 break;
327 spin_unlock(&reg_regdb_search_lock);
328 mutex_lock(&cfg80211_mutex);
329 set_regdom(regdom);
330 mutex_unlock(&cfg80211_mutex);
331 spin_lock(&reg_regdb_search_lock);
332 break;
333 }
334 }
335
336 kfree(request);
337 }
338 spin_unlock(&reg_regdb_search_lock);
339}
340
341static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
342
343static void reg_regdb_query(const char *alpha2)
344{
345 struct reg_regdb_search_request *request;
346
347 if (!alpha2)
348 return;
349
350 request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
351 if (!request)
352 return;
353
354 memcpy(request->alpha2, alpha2, 2);
355
356 spin_lock(&reg_regdb_search_lock);
357 list_add_tail(&request->list, &reg_regdb_search_list);
358 spin_unlock(&reg_regdb_search_lock);
359
360 schedule_work(&reg_regdb_work);
361}
362#else
363static inline void reg_regdb_query(const char *alpha2) {}
364#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
365
338/* 366/*
339 * This lets us keep regulatory code which is updated on a regulatory 367 * This lets us keep regulatory code which is updated on a regulatory
340 * basis in userspace. 368 * basis in userspace.
@@ -354,6 +382,9 @@ static int call_crda(const char *alpha2)
354 printk(KERN_INFO "cfg80211: Calling CRDA to update world " 382 printk(KERN_INFO "cfg80211: Calling CRDA to update world "
355 "regulatory domain\n"); 383 "regulatory domain\n");
356 384
385 /* query internal regulatory database (if it exists) */
386 reg_regdb_query(alpha2);
387
357 country_env[8] = alpha2[0]; 388 country_env[8] = alpha2[0];
358 country_env[9] = alpha2[1]; 389 country_env[9] = alpha2[1];
359 390
@@ -454,12 +485,212 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
454} 485}
455 486
456/* 487/*
488 * This is a work around for sanity checking ieee80211_channel_to_frequency()'s
489 * work. ieee80211_channel_to_frequency() can for example currently provide a
490 * 2 GHz channel when in fact a 5 GHz channel was desired. An example would be
491 * an AP providing channel 8 on a country IE triplet when it sent this on the
492 * 5 GHz band, that channel is designed to be channel 8 on 5 GHz, not a 2 GHz
493 * channel.
494 *
495 * This can be removed once ieee80211_channel_to_frequency() takes in a band.
496 */
497static bool chan_in_band(int chan, enum ieee80211_band band)
498{
499 int center_freq = ieee80211_channel_to_frequency(chan);
500
501 switch (band) {
502 case IEEE80211_BAND_2GHZ:
503 if (center_freq <= 2484)
504 return true;
505 return false;
506 case IEEE80211_BAND_5GHZ:
507 if (center_freq >= 5005)
508 return true;
509 return false;
510 default:
511 return false;
512 }
513}
514
515/*
516 * Some APs may send a country IE triplet for each channel they
517 * support and while this is completely overkill and silly we still
518 * need to support it. We avoid making a single rule for each channel
519 * though and to help us with this we use this helper to find the
520 * actual subband end channel. These type of country IE triplet
521 * scenerios are handled then, all yielding two regulaotry rules from
522 * parsing a country IE:
523 *
524 * [1]
525 * [2]
526 * [36]
527 * [40]
528 *
529 * [1]
530 * [2-4]
531 * [5-12]
532 * [36]
533 * [40-44]
534 *
535 * [1-4]
536 * [5-7]
537 * [36-44]
538 * [48-64]
539 *
540 * [36-36]
541 * [40-40]
542 * [44-44]
543 * [48-48]
544 * [52-52]
545 * [56-56]
546 * [60-60]
547 * [64-64]
548 * [100-100]
549 * [104-104]
550 * [108-108]
551 * [112-112]
552 * [116-116]
553 * [120-120]
554 * [124-124]
555 * [128-128]
556 * [132-132]
557 * [136-136]
558 * [140-140]
559 *
560 * Returns 0 if the IE has been found to be invalid in the middle
561 * somewhere.
562 */
563static int max_subband_chan(enum ieee80211_band band,
564 int orig_cur_chan,
565 int orig_end_channel,
566 s8 orig_max_power,
567 u8 **country_ie,
568 u8 *country_ie_len)
569{
570 u8 *triplets_start = *country_ie;
571 u8 len_at_triplet = *country_ie_len;
572 int end_subband_chan = orig_end_channel;
573
574 /*
575 * We'll deal with padding for the caller unless
576 * its not immediate and we don't process any channels
577 */
578 if (*country_ie_len == 1) {
579 *country_ie += 1;
580 *country_ie_len -= 1;
581 return orig_end_channel;
582 }
583
584 /* Move to the next triplet and then start search */
585 *country_ie += 3;
586 *country_ie_len -= 3;
587
588 if (!chan_in_band(orig_cur_chan, band))
589 return 0;
590
591 while (*country_ie_len >= 3) {
592 int end_channel = 0;
593 struct ieee80211_country_ie_triplet *triplet =
594 (struct ieee80211_country_ie_triplet *) *country_ie;
595 int cur_channel = 0, next_expected_chan;
596
597 /* means last triplet is completely unrelated to this one */
598 if (triplet->ext.reg_extension_id >=
599 IEEE80211_COUNTRY_EXTENSION_ID) {
600 *country_ie -= 3;
601 *country_ie_len += 3;
602 break;
603 }
604
605 if (triplet->chans.first_channel == 0) {
606 *country_ie += 1;
607 *country_ie_len -= 1;
608 if (*country_ie_len != 0)
609 return 0;
610 break;
611 }
612
613 if (triplet->chans.num_channels == 0)
614 return 0;
615
616 /* Monitonically increasing channel order */
617 if (triplet->chans.first_channel <= end_subband_chan)
618 return 0;
619
620 if (!chan_in_band(triplet->chans.first_channel, band))
621 return 0;
622
623 /* 2 GHz */
624 if (triplet->chans.first_channel <= 14) {
625 end_channel = triplet->chans.first_channel +
626 triplet->chans.num_channels - 1;
627 }
628 else {
629 end_channel = triplet->chans.first_channel +
630 (4 * (triplet->chans.num_channels - 1));
631 }
632
633 if (!chan_in_band(end_channel, band))
634 return 0;
635
636 if (orig_max_power != triplet->chans.max_power) {
637 *country_ie -= 3;
638 *country_ie_len += 3;
639 break;
640 }
641
642 cur_channel = triplet->chans.first_channel;
643
644 /* The key is finding the right next expected channel */
645 if (band == IEEE80211_BAND_2GHZ)
646 next_expected_chan = end_subband_chan + 1;
647 else
648 next_expected_chan = end_subband_chan + 4;
649
650 if (cur_channel != next_expected_chan) {
651 *country_ie -= 3;
652 *country_ie_len += 3;
653 break;
654 }
655
656 end_subband_chan = end_channel;
657
658 /* Move to the next one */
659 *country_ie += 3;
660 *country_ie_len -= 3;
661
662 /*
663 * Padding needs to be dealt with if we processed
664 * some channels.
665 */
666 if (*country_ie_len == 1) {
667 *country_ie += 1;
668 *country_ie_len -= 1;
669 break;
670 }
671
672 /* If seen, the IE is invalid */
673 if (*country_ie_len == 2)
674 return 0;
675 }
676
677 if (end_subband_chan == orig_end_channel) {
678 *country_ie = triplets_start;
679 *country_ie_len = len_at_triplet;
680 return orig_end_channel;
681 }
682
683 return end_subband_chan;
684}
685
686/*
457 * Converts a country IE to a regulatory domain. A regulatory domain 687 * Converts a country IE to a regulatory domain. A regulatory domain
458 * structure has a lot of information which the IE doesn't yet have, 688 * structure has a lot of information which the IE doesn't yet have,
459 * so for the other values we use upper max values as we will intersect 689 * so for the other values we use upper max values as we will intersect
460 * with our userspace regulatory agent to get lower bounds. 690 * with our userspace regulatory agent to get lower bounds.
461 */ 691 */
462static struct ieee80211_regdomain *country_ie_2_rd( 692static struct ieee80211_regdomain *country_ie_2_rd(
693 enum ieee80211_band band,
463 u8 *country_ie, 694 u8 *country_ie,
464 u8 country_ie_len, 695 u8 country_ie_len,
465 u32 *checksum) 696 u32 *checksum)
@@ -521,10 +752,29 @@ static struct ieee80211_regdomain *country_ie_2_rd(
521 continue; 752 continue;
522 } 753 }
523 754
755 /*
756 * APs can add padding to make length divisible
757 * by two, required by the spec.
758 */
759 if (triplet->chans.first_channel == 0) {
760 country_ie++;
761 country_ie_len--;
762 /* This is expected to be at the very end only */
763 if (country_ie_len != 0)
764 return NULL;
765 break;
766 }
767
768 if (triplet->chans.num_channels == 0)
769 return NULL;
770
771 if (!chan_in_band(triplet->chans.first_channel, band))
772 return NULL;
773
524 /* 2 GHz */ 774 /* 2 GHz */
525 if (triplet->chans.first_channel <= 14) 775 if (band == IEEE80211_BAND_2GHZ)
526 end_channel = triplet->chans.first_channel + 776 end_channel = triplet->chans.first_channel +
527 triplet->chans.num_channels; 777 triplet->chans.num_channels - 1;
528 else 778 else
529 /* 779 /*
530 * 5 GHz -- For example in country IEs if the first 780 * 5 GHz -- For example in country IEs if the first
@@ -539,6 +789,24 @@ static struct ieee80211_regdomain *country_ie_2_rd(
539 (4 * (triplet->chans.num_channels - 1)); 789 (4 * (triplet->chans.num_channels - 1));
540 790
541 cur_channel = triplet->chans.first_channel; 791 cur_channel = triplet->chans.first_channel;
792
793 /*
794 * Enhancement for APs that send a triplet for every channel
795 * or for whatever reason sends triplets with multiple channels
796 * separated when in fact they should be together.
797 */
798 end_channel = max_subband_chan(band,
799 cur_channel,
800 end_channel,
801 triplet->chans.max_power,
802 &country_ie,
803 &country_ie_len);
804 if (!end_channel)
805 return NULL;
806
807 if (!chan_in_band(end_channel, band))
808 return NULL;
809
542 cur_sub_max_channel = end_channel; 810 cur_sub_max_channel = end_channel;
543 811
544 /* Basic sanity check */ 812 /* Basic sanity check */
@@ -569,10 +837,13 @@ static struct ieee80211_regdomain *country_ie_2_rd(
569 837
570 last_sub_max_channel = cur_sub_max_channel; 838 last_sub_max_channel = cur_sub_max_channel;
571 839
572 country_ie += 3;
573 country_ie_len -= 3;
574 num_rules++; 840 num_rules++;
575 841
842 if (country_ie_len >= 3) {
843 country_ie += 3;
844 country_ie_len -= 3;
845 }
846
576 /* 847 /*
577 * Note: this is not a IEEE requirement but 848 * Note: this is not a IEEE requirement but
578 * simply a memory requirement 849 * simply a memory requirement
@@ -615,6 +886,12 @@ static struct ieee80211_regdomain *country_ie_2_rd(
615 continue; 886 continue;
616 } 887 }
617 888
889 if (triplet->chans.first_channel == 0) {
890 country_ie++;
891 country_ie_len--;
892 break;
893 }
894
618 reg_rule = &rd->reg_rules[i]; 895 reg_rule = &rd->reg_rules[i];
619 freq_range = &reg_rule->freq_range; 896 freq_range = &reg_rule->freq_range;
620 power_rule = &reg_rule->power_rule; 897 power_rule = &reg_rule->power_rule;
@@ -622,13 +899,20 @@ static struct ieee80211_regdomain *country_ie_2_rd(
622 reg_rule->flags = flags; 899 reg_rule->flags = flags;
623 900
624 /* 2 GHz */ 901 /* 2 GHz */
625 if (triplet->chans.first_channel <= 14) 902 if (band == IEEE80211_BAND_2GHZ)
626 end_channel = triplet->chans.first_channel + 903 end_channel = triplet->chans.first_channel +
627 triplet->chans.num_channels; 904 triplet->chans.num_channels -1;
628 else 905 else
629 end_channel = triplet->chans.first_channel + 906 end_channel = triplet->chans.first_channel +
630 (4 * (triplet->chans.num_channels - 1)); 907 (4 * (triplet->chans.num_channels - 1));
631 908
909 end_channel = max_subband_chan(band,
910 triplet->chans.first_channel,
911 end_channel,
912 triplet->chans.max_power,
913 &country_ie,
914 &country_ie_len);
915
632 /* 916 /*
633 * The +10 is since the regulatory domain expects 917 * The +10 is since the regulatory domain expects
634 * the actual band edge, not the center of freq for 918 * the actual band edge, not the center of freq for
@@ -649,12 +933,15 @@ static struct ieee80211_regdomain *country_ie_2_rd(
649 */ 933 */
650 freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40); 934 freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40);
651 power_rule->max_antenna_gain = DBI_TO_MBI(100); 935 power_rule->max_antenna_gain = DBI_TO_MBI(100);
652 power_rule->max_eirp = DBM_TO_MBM(100); 936 power_rule->max_eirp = DBM_TO_MBM(triplet->chans.max_power);
653 937
654 country_ie += 3;
655 country_ie_len -= 3;
656 i++; 938 i++;
657 939
940 if (country_ie_len >= 3) {
941 country_ie += 3;
942 country_ie_len -= 3;
943 }
944
658 BUG_ON(i > NL80211_MAX_SUPP_REG_RULES); 945 BUG_ON(i > NL80211_MAX_SUPP_REG_RULES);
659 } 946 }
660 947
@@ -950,25 +1237,21 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
950 if (r == -ERANGE && 1237 if (r == -ERANGE &&
951 last_request->initiator == 1238 last_request->initiator ==
952 NL80211_REGDOM_SET_BY_COUNTRY_IE) { 1239 NL80211_REGDOM_SET_BY_COUNTRY_IE) {
953#ifdef CONFIG_CFG80211_REG_DEBUG 1240 REG_DBG_PRINT("cfg80211: Leaving channel %d MHz "
954 printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz "
955 "intact on %s - no rule found in band on " 1241 "intact on %s - no rule found in band on "
956 "Country IE\n", 1242 "Country IE\n",
957 chan->center_freq, wiphy_name(wiphy)); 1243 chan->center_freq, wiphy_name(wiphy));
958#endif
959 } else { 1244 } else {
960 /* 1245 /*
961 * In this case we know the country IE has at least one reg rule 1246 * In this case we know the country IE has at least one reg rule
962 * for the band so we respect its band definitions 1247 * for the band so we respect its band definitions
963 */ 1248 */
964#ifdef CONFIG_CFG80211_REG_DEBUG
965 if (last_request->initiator == 1249 if (last_request->initiator ==
966 NL80211_REGDOM_SET_BY_COUNTRY_IE) 1250 NL80211_REGDOM_SET_BY_COUNTRY_IE)
967 printk(KERN_DEBUG "cfg80211: Disabling " 1251 REG_DBG_PRINT("cfg80211: Disabling "
968 "channel %d MHz on %s due to " 1252 "channel %d MHz on %s due to "
969 "Country IE\n", 1253 "Country IE\n",
970 chan->center_freq, wiphy_name(wiphy)); 1254 chan->center_freq, wiphy_name(wiphy));
971#endif
972 flags |= IEEE80211_CHAN_DISABLED; 1255 flags |= IEEE80211_CHAN_DISABLED;
973 chan->flags = flags; 1256 chan->flags = flags;
974 } 1257 }
@@ -1342,30 +1625,6 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1342} 1625}
1343EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1626EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1344 1627
1345static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
1346 const struct ieee80211_regdomain *src_regd)
1347{
1348 struct ieee80211_regdomain *regd;
1349 int size_of_regd = 0;
1350 unsigned int i;
1351
1352 size_of_regd = sizeof(struct ieee80211_regdomain) +
1353 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
1354
1355 regd = kzalloc(size_of_regd, GFP_KERNEL);
1356 if (!regd)
1357 return -ENOMEM;
1358
1359 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
1360
1361 for (i = 0; i < src_regd->n_reg_rules; i++)
1362 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
1363 sizeof(struct ieee80211_reg_rule));
1364
1365 *dst_regd = regd;
1366 return 0;
1367}
1368
1369/* 1628/*
1370 * Return value which can be used by ignore_request() to indicate 1629 * Return value which can be used by ignore_request() to indicate
1371 * it has been determined we should intersect two regulatory domains 1630 * it has been determined we should intersect two regulatory domains
@@ -1418,8 +1677,6 @@ static int ignore_request(struct wiphy *wiphy,
1418 return REG_INTERSECT; 1677 return REG_INTERSECT;
1419 case NL80211_REGDOM_SET_BY_DRIVER: 1678 case NL80211_REGDOM_SET_BY_DRIVER:
1420 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { 1679 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
1421 if (is_old_static_regdom(cfg80211_regdomain))
1422 return 0;
1423 if (regdom_changes(pending_request->alpha2)) 1680 if (regdom_changes(pending_request->alpha2))
1424 return 0; 1681 return 0;
1425 return -EALREADY; 1682 return -EALREADY;
@@ -1456,8 +1713,7 @@ static int ignore_request(struct wiphy *wiphy,
1456 return -EAGAIN; 1713 return -EAGAIN;
1457 } 1714 }
1458 1715
1459 if (!is_old_static_regdom(cfg80211_regdomain) && 1716 if (!regdom_changes(pending_request->alpha2))
1460 !regdom_changes(pending_request->alpha2))
1461 return -EALREADY; 1717 return -EALREADY;
1462 1718
1463 return 0; 1719 return 0;
@@ -1758,8 +2014,9 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy,
1758 * therefore cannot iterate over the rdev list here. 2014 * therefore cannot iterate over the rdev list here.
1759 */ 2015 */
1760void regulatory_hint_11d(struct wiphy *wiphy, 2016void regulatory_hint_11d(struct wiphy *wiphy,
1761 u8 *country_ie, 2017 enum ieee80211_band band,
1762 u8 country_ie_len) 2018 u8 *country_ie,
2019 u8 country_ie_len)
1763{ 2020{
1764 struct ieee80211_regdomain *rd = NULL; 2021 struct ieee80211_regdomain *rd = NULL;
1765 char alpha2[2]; 2022 char alpha2[2];
@@ -1805,9 +2062,11 @@ void regulatory_hint_11d(struct wiphy *wiphy,
1805 wiphy_idx_valid(last_request->wiphy_idx))) 2062 wiphy_idx_valid(last_request->wiphy_idx)))
1806 goto out; 2063 goto out;
1807 2064
1808 rd = country_ie_2_rd(country_ie, country_ie_len, &checksum); 2065 rd = country_ie_2_rd(band, country_ie, country_ie_len, &checksum);
1809 if (!rd) 2066 if (!rd) {
2067 REG_DBG_PRINT("cfg80211: Ignoring bogus country IE\n");
1810 goto out; 2068 goto out;
2069 }
1811 2070
1812 /* 2071 /*
1813 * This will not happen right now but we leave it here for the 2072 * This will not happen right now but we leave it here for the
@@ -1875,13 +2134,12 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
1875 if (!reg_beacon) 2134 if (!reg_beacon)
1876 return -ENOMEM; 2135 return -ENOMEM;
1877 2136
1878#ifdef CONFIG_CFG80211_REG_DEBUG 2137 REG_DBG_PRINT("cfg80211: Found new beacon on "
1879 printk(KERN_DEBUG "cfg80211: Found new beacon on " 2138 "frequency: %d MHz (Ch %d) on %s\n",
1880 "frequency: %d MHz (Ch %d) on %s\n", 2139 beacon_chan->center_freq,
1881 beacon_chan->center_freq, 2140 ieee80211_frequency_to_channel(beacon_chan->center_freq),
1882 ieee80211_frequency_to_channel(beacon_chan->center_freq), 2141 wiphy_name(wiphy));
1883 wiphy_name(wiphy)); 2142
1884#endif
1885 memcpy(&reg_beacon->chan, beacon_chan, 2143 memcpy(&reg_beacon->chan, beacon_chan,
1886 sizeof(struct ieee80211_channel)); 2144 sizeof(struct ieee80211_channel));
1887 2145
@@ -2039,8 +2297,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2039 * If someone else asked us to change the rd lets only bother 2297 * If someone else asked us to change the rd lets only bother
2040 * checking if the alpha2 changes if CRDA was already called 2298 * checking if the alpha2 changes if CRDA was already called
2041 */ 2299 */
2042 if (!is_old_static_regdom(cfg80211_regdomain) && 2300 if (!regdom_changes(rd->alpha2))
2043 !regdom_changes(rd->alpha2))
2044 return -EINVAL; 2301 return -EINVAL;
2045 } 2302 }
2046 2303
@@ -2239,15 +2496,8 @@ int regulatory_init(void)
2239 spin_lock_init(&reg_requests_lock); 2496 spin_lock_init(&reg_requests_lock);
2240 spin_lock_init(&reg_pending_beacons_lock); 2497 spin_lock_init(&reg_pending_beacons_lock);
2241 2498
2242#ifdef CONFIG_WIRELESS_OLD_REGULATORY
2243 cfg80211_regdomain = static_regdom(ieee80211_regdom);
2244
2245 printk(KERN_INFO "cfg80211: Using static regulatory domain info\n");
2246 print_regdomain_info(cfg80211_regdomain);
2247#else
2248 cfg80211_regdomain = cfg80211_world_regdom; 2499 cfg80211_regdomain = cfg80211_world_regdom;
2249 2500
2250#endif
2251 /* We always try to get an update for the static regdomain */ 2501 /* We always try to get an update for the static regdomain */
2252 err = regulatory_hint_core(cfg80211_regdomain->alpha2); 2502 err = regulatory_hint_core(cfg80211_regdomain->alpha2);
2253 if (err) { 2503 if (err) {
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 3362c7c069b..3018508226a 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -41,14 +41,25 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
41 * regulatory_hint_11d - hints a country IE as a regulatory domain 41 * regulatory_hint_11d - hints a country IE as a regulatory domain
42 * @wiphy: the wireless device giving the hint (used only for reporting 42 * @wiphy: the wireless device giving the hint (used only for reporting
43 * conflicts) 43 * conflicts)
44 * @band: the band on which the country IE was received on. This determines
45 * the band we'll process the country IE channel triplets for.
44 * @country_ie: pointer to the country IE 46 * @country_ie: pointer to the country IE
45 * @country_ie_len: length of the country IE 47 * @country_ie_len: length of the country IE
46 * 48 *
47 * We will intersect the rd with the what CRDA tells us should apply 49 * We will intersect the rd with the what CRDA tells us should apply
48 * for the alpha2 this country IE belongs to, this prevents APs from 50 * for the alpha2 this country IE belongs to, this prevents APs from
49 * sending us incorrect or outdated information against a country. 51 * sending us incorrect or outdated information against a country.
52 *
53 * The AP is expected to provide Country IE channel triplets for the
54 * band it is on. It is technically possible for APs to send channel
55 * country IE triplets even for channels outside of the band they are
56 * in but for that they would have to use the regulatory extension
57 * in combination with a triplet but this behaviour is currently
58 * not observed. For this reason if a triplet is seen with channel
59 * information for a band the BSS is not present in it will be ignored.
50 */ 60 */
51void regulatory_hint_11d(struct wiphy *wiphy, 61void regulatory_hint_11d(struct wiphy *wiphy,
62 enum ieee80211_band band,
52 u8 *country_ie, 63 u8 *country_ie,
53 u8 country_ie_len); 64 u8 country_ie_len);
54 65
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
new file mode 100644
index 00000000000..818222c9251
--- /dev/null
+++ b/net/wireless/regdb.h
@@ -0,0 +1,7 @@
1#ifndef __REGDB_H__
2#define __REGDB_H__
3
4extern const struct ieee80211_regdomain *reg_regdb[];
5extern int reg_regdb_size;
6
7#endif /* __REGDB_H__ */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0c2cbbebca9..06b0231ee5e 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -100,8 +100,10 @@ static void bss_release(struct kref *ref)
100 if (bss->pub.free_priv) 100 if (bss->pub.free_priv)
101 bss->pub.free_priv(&bss->pub); 101 bss->pub.free_priv(&bss->pub);
102 102
103 if (bss->ies_allocated) 103 if (bss->beacon_ies_allocated)
104 kfree(bss->pub.information_elements); 104 kfree(bss->pub.beacon_ies);
105 if (bss->proberesp_ies_allocated)
106 kfree(bss->pub.proberesp_ies);
105 107
106 BUG_ON(atomic_read(&bss->hold)); 108 BUG_ON(atomic_read(&bss->hold));
107 109
@@ -375,8 +377,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
375 377
376static struct cfg80211_internal_bss * 378static struct cfg80211_internal_bss *
377cfg80211_bss_update(struct cfg80211_registered_device *dev, 379cfg80211_bss_update(struct cfg80211_registered_device *dev,
378 struct cfg80211_internal_bss *res, 380 struct cfg80211_internal_bss *res)
379 bool overwrite)
380{ 381{
381 struct cfg80211_internal_bss *found = NULL; 382 struct cfg80211_internal_bss *found = NULL;
382 const u8 *meshid, *meshcfg; 383 const u8 *meshid, *meshcfg;
@@ -418,28 +419,64 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
418 found->pub.capability = res->pub.capability; 419 found->pub.capability = res->pub.capability;
419 found->ts = res->ts; 420 found->ts = res->ts;
420 421
421 /* overwrite IEs */ 422 /* Update IEs */
422 if (overwrite) { 423 if (res->pub.proberesp_ies) {
423 size_t used = dev->wiphy.bss_priv_size + sizeof(*res); 424 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
424 size_t ielen = res->pub.len_information_elements; 425 size_t ielen = res->pub.len_proberesp_ies;
426
427 if (found->pub.proberesp_ies &&
428 !found->proberesp_ies_allocated &&
429 ksize(found) >= used + ielen) {
430 memcpy(found->pub.proberesp_ies,
431 res->pub.proberesp_ies, ielen);
432 found->pub.len_proberesp_ies = ielen;
433 } else {
434 u8 *ies = found->pub.proberesp_ies;
435
436 if (found->proberesp_ies_allocated)
437 ies = krealloc(ies, ielen, GFP_ATOMIC);
438 else
439 ies = kmalloc(ielen, GFP_ATOMIC);
440
441 if (ies) {
442 memcpy(ies, res->pub.proberesp_ies,
443 ielen);
444 found->proberesp_ies_allocated = true;
445 found->pub.proberesp_ies = ies;
446 found->pub.len_proberesp_ies = ielen;
447 }
448 }
425 449
426 if (!found->ies_allocated && ksize(found) >= used + ielen) { 450 /* Override possible earlier Beacon frame IEs */
427 memcpy(found->pub.information_elements, 451 found->pub.information_elements =
428 res->pub.information_elements, ielen); 452 found->pub.proberesp_ies;
429 found->pub.len_information_elements = ielen; 453 found->pub.len_information_elements =
454 found->pub.len_proberesp_ies;
455 }
456 if (res->pub.beacon_ies) {
457 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
458 size_t ielen = res->pub.len_beacon_ies;
459
460 if (found->pub.beacon_ies &&
461 !found->beacon_ies_allocated &&
462 ksize(found) >= used + ielen) {
463 memcpy(found->pub.beacon_ies,
464 res->pub.beacon_ies, ielen);
465 found->pub.len_beacon_ies = ielen;
430 } else { 466 } else {
431 u8 *ies = found->pub.information_elements; 467 u8 *ies = found->pub.beacon_ies;
432 468
433 if (found->ies_allocated) 469 if (found->beacon_ies_allocated)
434 ies = krealloc(ies, ielen, GFP_ATOMIC); 470 ies = krealloc(ies, ielen, GFP_ATOMIC);
435 else 471 else
436 ies = kmalloc(ielen, GFP_ATOMIC); 472 ies = kmalloc(ielen, GFP_ATOMIC);
437 473
438 if (ies) { 474 if (ies) {
439 memcpy(ies, res->pub.information_elements, ielen); 475 memcpy(ies, res->pub.beacon_ies,
440 found->ies_allocated = true; 476 ielen);
441 found->pub.information_elements = ies; 477 found->beacon_ies_allocated = true;
442 found->pub.len_information_elements = ielen; 478 found->pub.beacon_ies = ies;
479 found->pub.len_beacon_ies = ielen;
443 } 480 }
444 } 481 }
445 } 482 }
@@ -489,14 +526,26 @@ cfg80211_inform_bss(struct wiphy *wiphy,
489 res->pub.tsf = timestamp; 526 res->pub.tsf = timestamp;
490 res->pub.beacon_interval = beacon_interval; 527 res->pub.beacon_interval = beacon_interval;
491 res->pub.capability = capability; 528 res->pub.capability = capability;
492 /* point to after the private area */ 529 /*
493 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz; 530 * Since we do not know here whether the IEs are from a Beacon or Probe
494 memcpy(res->pub.information_elements, ie, ielen); 531 * Response frame, we need to pick one of the options and only use it
495 res->pub.len_information_elements = ielen; 532 * with the driver that does not provide the full Beacon/Probe Response
533 * frame. Use Beacon frame pointer to avoid indicating that this should
534 * override the information_elements pointer should we have received an
535 * earlier indication of Probe Response data.
536 *
537 * The initial buffer for the IEs is allocated with the BSS entry and
538 * is located after the private area.
539 */
540 res->pub.beacon_ies = (u8 *)res + sizeof(*res) + privsz;
541 memcpy(res->pub.beacon_ies, ie, ielen);
542 res->pub.len_beacon_ies = ielen;
543 res->pub.information_elements = res->pub.beacon_ies;
544 res->pub.len_information_elements = res->pub.len_beacon_ies;
496 545
497 kref_init(&res->ref); 546 kref_init(&res->ref);
498 547
499 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, 0); 548 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
500 if (!res) 549 if (!res)
501 return NULL; 550 return NULL;
502 551
@@ -517,7 +566,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
517 struct cfg80211_internal_bss *res; 566 struct cfg80211_internal_bss *res;
518 size_t ielen = len - offsetof(struct ieee80211_mgmt, 567 size_t ielen = len - offsetof(struct ieee80211_mgmt,
519 u.probe_resp.variable); 568 u.probe_resp.variable);
520 bool overwrite;
521 size_t privsz = wiphy->bss_priv_size; 569 size_t privsz = wiphy->bss_priv_size;
522 570
523 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 571 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC &&
@@ -538,16 +586,28 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
538 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); 586 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
539 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 587 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
540 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); 588 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
541 /* point to after the private area */ 589 /*
542 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz; 590 * The initial buffer for the IEs is allocated with the BSS entry and
543 memcpy(res->pub.information_elements, mgmt->u.probe_resp.variable, ielen); 591 * is located after the private area.
544 res->pub.len_information_elements = ielen; 592 */
593 if (ieee80211_is_probe_resp(mgmt->frame_control)) {
594 res->pub.proberesp_ies = (u8 *) res + sizeof(*res) + privsz;
595 memcpy(res->pub.proberesp_ies, mgmt->u.probe_resp.variable,
596 ielen);
597 res->pub.len_proberesp_ies = ielen;
598 res->pub.information_elements = res->pub.proberesp_ies;
599 res->pub.len_information_elements = res->pub.len_proberesp_ies;
600 } else {
601 res->pub.beacon_ies = (u8 *) res + sizeof(*res) + privsz;
602 memcpy(res->pub.beacon_ies, mgmt->u.beacon.variable, ielen);
603 res->pub.len_beacon_ies = ielen;
604 res->pub.information_elements = res->pub.beacon_ies;
605 res->pub.len_information_elements = res->pub.len_beacon_ies;
606 }
545 607
546 kref_init(&res->ref); 608 kref_init(&res->ref);
547 609
548 overwrite = ieee80211_is_probe_resp(mgmt->frame_control); 610 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
549
550 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, overwrite);
551 if (!res) 611 if (!res)
552 return NULL; 612 return NULL;
553 613
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dc0fc4989d5..745c37e7992 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -454,6 +454,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
454 * - and country_ie[1] which is the IE length 454 * - and country_ie[1] which is the IE length
455 */ 455 */
456 regulatory_hint_11d(wdev->wiphy, 456 regulatory_hint_11d(wdev->wiphy,
457 bss->channel->band,
457 country_ie + 2, 458 country_ie + 2,
458 country_ie[1]); 459 country_ie[1]);
459} 460}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 59361fdcb5d..23557c1d0a9 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -285,7 +285,7 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
285 } 285 }
286} 286}
287 287
288int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr, 288int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
289 enum nl80211_iftype iftype) 289 enum nl80211_iftype iftype)
290{ 290{
291 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 291 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -383,7 +383,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
383} 383}
384EXPORT_SYMBOL(ieee80211_data_to_8023); 384EXPORT_SYMBOL(ieee80211_data_to_8023);
385 385
386int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr, 386int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
387 enum nl80211_iftype iftype, u8 *bssid, bool qos) 387 enum nl80211_iftype iftype, u8 *bssid, bool qos)
388{ 388{
389 struct ieee80211_hdr hdr; 389 struct ieee80211_hdr hdr;
@@ -497,6 +497,101 @@ int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
497} 497}
498EXPORT_SYMBOL(ieee80211_data_from_8023); 498EXPORT_SYMBOL(ieee80211_data_from_8023);
499 499
500
501void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
502 const u8 *addr, enum nl80211_iftype iftype,
503 const unsigned int extra_headroom)
504{
505 struct sk_buff *frame = NULL;
506 u16 ethertype;
507 u8 *payload;
508 const struct ethhdr *eth;
509 int remaining, err;
510 u8 dst[ETH_ALEN], src[ETH_ALEN];
511
512 err = ieee80211_data_to_8023(skb, addr, iftype);
513 if (err)
514 goto out;
515
516 /* skip the wrapping header */
517 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
518 if (!eth)
519 goto out;
520
521 while (skb != frame) {
522 u8 padding;
523 __be16 len = eth->h_proto;
524 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
525
526 remaining = skb->len;
527 memcpy(dst, eth->h_dest, ETH_ALEN);
528 memcpy(src, eth->h_source, ETH_ALEN);
529
530 padding = (4 - subframe_len) & 0x3;
531 /* the last MSDU has no padding */
532 if (subframe_len > remaining)
533 goto purge;
534
535 skb_pull(skb, sizeof(struct ethhdr));
536 /* reuse skb for the last subframe */
537 if (remaining <= subframe_len + padding)
538 frame = skb;
539 else {
540 unsigned int hlen = ALIGN(extra_headroom, 4);
541 /*
542 * Allocate and reserve two bytes more for payload
543 * alignment since sizeof(struct ethhdr) is 14.
544 */
545 frame = dev_alloc_skb(hlen + subframe_len + 2);
546 if (!frame)
547 goto purge;
548
549 skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
550 memcpy(skb_put(frame, ntohs(len)), skb->data,
551 ntohs(len));
552
553 eth = (struct ethhdr *)skb_pull(skb, ntohs(len) +
554 padding);
555 if (!eth) {
556 dev_kfree_skb(frame);
557 goto purge;
558 }
559 }
560
561 skb_reset_network_header(frame);
562 frame->dev = skb->dev;
563 frame->priority = skb->priority;
564
565 payload = frame->data;
566 ethertype = (payload[6] << 8) | payload[7];
567
568 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
569 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
570 compare_ether_addr(payload,
571 bridge_tunnel_header) == 0)) {
572 /* remove RFC1042 or Bridge-Tunnel
573 * encapsulation and replace EtherType */
574 skb_pull(frame, 6);
575 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
576 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
577 } else {
578 memcpy(skb_push(frame, sizeof(__be16)), &len,
579 sizeof(__be16));
580 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
581 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
582 }
583 __skb_queue_tail(list, frame);
584 }
585
586 return;
587
588 purge:
589 __skb_queue_purge(list);
590 out:
591 dev_kfree_skb(skb);
592}
593EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
594
500/* Given a data frame determine the 802.1p/1d tag to use. */ 595/* Given a data frame determine the 802.1p/1d tag to use. */
501unsigned int cfg80211_classify8021d(struct sk_buff *skb) 596unsigned int cfg80211_classify8021d(struct sk_buff *skb)
502{ 597{
@@ -720,3 +815,36 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
720 815
721 return err; 816 return err;
722} 817}
818
819u16 cfg80211_calculate_bitrate(struct rate_info *rate)
820{
821 int modulation, streams, bitrate;
822
823 if (!(rate->flags & RATE_INFO_FLAGS_MCS))
824 return rate->legacy;
825
826 /* the formula below does only work for MCS values smaller than 32 */
827 if (rate->mcs >= 32)
828 return 0;
829
830 modulation = rate->mcs & 7;
831 streams = (rate->mcs >> 3) + 1;
832
833 bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
834 13500000 : 6500000;
835
836 if (modulation < 4)
837 bitrate *= (modulation + 1);
838 else if (modulation == 4)
839 bitrate *= (modulation + 2);
840 else
841 bitrate *= (modulation + 3);
842
843 bitrate *= streams;
844
845 if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
846 bitrate = (bitrate / 9) * 10;
847
848 /* do NOT round down here */
849 return (bitrate + 50000) / 100000;
850}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 54face3d442..966d2f01bea 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1204,21 +1204,47 @@ int cfg80211_wext_siwrate(struct net_device *dev,
1204 struct wireless_dev *wdev = dev->ieee80211_ptr; 1204 struct wireless_dev *wdev = dev->ieee80211_ptr;
1205 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1205 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
1206 struct cfg80211_bitrate_mask mask; 1206 struct cfg80211_bitrate_mask mask;
1207 u32 fixed, maxrate;
1208 struct ieee80211_supported_band *sband;
1209 int band, ridx;
1210 bool match = false;
1207 1211
1208 if (!rdev->ops->set_bitrate_mask) 1212 if (!rdev->ops->set_bitrate_mask)
1209 return -EOPNOTSUPP; 1213 return -EOPNOTSUPP;
1210 1214
1211 mask.fixed = 0; 1215 memset(&mask, 0, sizeof(mask));
1212 mask.maxrate = 0; 1216 fixed = 0;
1217 maxrate = 0;
1213 1218
1214 if (rate->value < 0) { 1219 if (rate->value < 0) {
1215 /* nothing */ 1220 /* nothing */
1216 } else if (rate->fixed) { 1221 } else if (rate->fixed) {
1217 mask.fixed = rate->value / 1000; /* kbps */ 1222 fixed = rate->value / 100000;
1218 } else { 1223 } else {
1219 mask.maxrate = rate->value / 1000; /* kbps */ 1224 maxrate = rate->value / 100000;
1220 } 1225 }
1221 1226
1227 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1228 sband = wdev->wiphy->bands[band];
1229 if (sband == NULL)
1230 continue;
1231 for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
1232 struct ieee80211_rate *srate = &sband->bitrates[ridx];
1233 if (fixed == srate->bitrate) {
1234 mask.control[band].legacy = 1 << ridx;
1235 match = true;
1236 break;
1237 }
1238 if (srate->bitrate <= maxrate) {
1239 mask.control[band].legacy |= 1 << ridx;
1240 match = true;
1241 }
1242 }
1243 }
1244
1245 if (!match)
1246 return -EINVAL;
1247
1222 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); 1248 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
1223} 1249}
1224EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate); 1250EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
@@ -1257,10 +1283,7 @@ int cfg80211_wext_giwrate(struct net_device *dev,
1257 if (!(sinfo.filled & STATION_INFO_TX_BITRATE)) 1283 if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
1258 return -EOPNOTSUPP; 1284 return -EOPNOTSUPP;
1259 1285
1260 rate->value = 0; 1286 rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
1261
1262 if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
1263 rate->value = 100000 * sinfo.txrate.legacy;
1264 1287
1265 return 0; 1288 return 0;
1266} 1289}
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index 273a7f77c83..8bafa31fa9f 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -140,7 +140,7 @@ static const struct file_operations wireless_seq_fops = {
140 .release = seq_release_net, 140 .release = seq_release_net,
141}; 141};
142 142
143int wext_proc_init(struct net *net) 143int __net_init wext_proc_init(struct net *net)
144{ 144{
145 /* Create /proc/net/wireless entry */ 145 /* Create /proc/net/wireless entry */
146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) 146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops))
@@ -149,7 +149,7 @@ int wext_proc_init(struct net *net)
149 return 0; 149 return 0;
150} 150}
151 151
152void wext_proc_exit(struct net *net) 152void __net_exit wext_proc_exit(struct net *net)
153{ 153{
154 proc_net_remove(net, "wireless"); 154 proc_net_remove(net, "wireless");
155} 155}
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fef8db553e8..c083a4e4e79 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -15,7 +15,7 @@
15#include <net/snmp.h> 15#include <net/snmp.h>
16#include <net/xfrm.h> 16#include <net/xfrm.h>
17 17
18static struct snmp_mib xfrm_mib_list[] = { 18static const struct snmp_mib xfrm_mib_list[] = {
19 SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR), 19 SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR),
20 SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR), 20 SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR),
21 SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR), 21 SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR),
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 2e221f2cad7..2c4d6cdcba4 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -2,7 +2,7 @@
2#include <net/net_namespace.h> 2#include <net/net_namespace.h>
3#include <net/xfrm.h> 3#include <net/xfrm.h>
4 4
5static void __xfrm_sysctl_init(struct net *net) 5static void __net_init __xfrm_sysctl_init(struct net *net)
6{ 6{
7 net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME; 7 net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME;
8 net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE; 8 net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE;
@@ -64,7 +64,7 @@ out_kmemdup:
64 return -ENOMEM; 64 return -ENOMEM;
65} 65}
66 66
67void xfrm_sysctl_fini(struct net *net) 67void __net_exit xfrm_sysctl_fini(struct net *net)
68{ 68{
69 struct ctl_table *table; 69 struct ctl_table *table;
70 70